Compare commits
No commits in common. "419994ee327b0700d8f4c5834341135d571ae4e9" and "1cf0364268452ccc8470d082de3abd87eda337b1" have entirely different histories.
419994ee32
...
1cf0364268
@ -1,18 +0,0 @@
|
||||
# Generated by Django 3.2.18 on 2023-02-16 22:38
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('ircbot', '0018_ircserver_replace_irc_control_with_markdown'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='ircchannel',
|
||||
name='discord_bridge',
|
||||
field=models.CharField(blank=True, default='', max_length=32),
|
||||
),
|
||||
]
|
@ -104,8 +104,6 @@ class IrcChannel(models.Model):
|
||||
|
||||
markov_learn_from_channel = models.BooleanField(default=True)
|
||||
|
||||
discord_bridge = models.CharField(default='', max_length=32, blank=True)
|
||||
|
||||
class Meta:
|
||||
"""Settings for the model."""
|
||||
|
||||
|
@ -1,16 +1,17 @@
|
||||
"""Provide methods for manipulating markov chain processing."""
|
||||
import logging
|
||||
from random import SystemRandom as sysrand
|
||||
import random
|
||||
|
||||
from django.db.models import Sum
|
||||
|
||||
from markov.models import MarkovContext, MarkovState, MarkovTarget
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
log = logging.getLogger('markov.lib')
|
||||
|
||||
|
||||
def generate_line(context, topics=None, min_words=15, max_words=30, sentence_bias=2, max_tries=5):
|
||||
"""Combine multiple sentences together into a coherent sentence."""
|
||||
"""String multiple sentences together into a coherent sentence."""
|
||||
|
||||
tries = 0
|
||||
line = []
|
||||
min_words_per_sentence = min_words / sentence_bias
|
||||
@ -22,7 +23,7 @@ def generate_line(context, topics=None, min_words=15, max_words=30, sentence_bia
|
||||
else:
|
||||
if len(line) > 0:
|
||||
if line[-1][-1] not in [',', '.', '!', '?', ':']:
|
||||
line[-1] += sysrand.choice(['?', '.', '!'])
|
||||
line[-1] += random.choice(['?', '.', '!'])
|
||||
|
||||
tries += 1
|
||||
|
||||
@ -32,6 +33,7 @@ def generate_line(context, topics=None, min_words=15, max_words=30, sentence_bia
|
||||
|
||||
def generate_longish_sentence(context, topics=None, min_words=15, max_words=30, max_tries=100):
|
||||
"""Generate a Markov chain, but throw away the short ones unless we get desperate."""
|
||||
|
||||
sent = ""
|
||||
tries = 0
|
||||
while tries < max_tries:
|
||||
@ -50,19 +52,20 @@ def generate_longish_sentence(context, topics=None, min_words=15, max_words=30,
|
||||
|
||||
def generate_sentence(context, topics=None, min_words=15, max_words=30):
|
||||
"""Generate a Markov chain."""
|
||||
|
||||
words = []
|
||||
# if we have topics, try to work from it and work backwards
|
||||
if topics:
|
||||
topic_word = sysrand.choice(topics)
|
||||
topic_word = random.choice(topics)
|
||||
topics.remove(topic_word)
|
||||
log.debug("looking for topic '%s'", topic_word)
|
||||
log.debug("looking for topic '{0:s}'".format(topic_word))
|
||||
new_states = MarkovState.objects.filter(context=context, v=topic_word)
|
||||
|
||||
if len(new_states) > 0:
|
||||
log.debug("found '%s', starting backwards", topic_word)
|
||||
log.debug("found '{0:s}', starting backwards".format(topic_word))
|
||||
words.insert(0, topic_word)
|
||||
while len(words) <= max_words and words[0] != MarkovState._start2:
|
||||
log.debug("looking backwards for '%s'", words[0])
|
||||
log.debug("looking backwards for '{0:s}'".format(words[0]))
|
||||
new_states = MarkovState.objects.filter(context=context, v=words[0])
|
||||
# if we find a start, use it
|
||||
if MarkovState._start2 in new_states:
|
||||
@ -84,7 +87,7 @@ def generate_sentence(context, topics=None, min_words=15, max_words=30):
|
||||
|
||||
i = len(words)
|
||||
while words[-1] != MarkovState._stop:
|
||||
log.debug("looking for '%s','%s'", words[i-2], words[i-1])
|
||||
log.debug("looking for '{0:s}','{1:s}'".format(words[i-2], words[i-1]))
|
||||
new_states = MarkovState.objects.filter(context=context, k1=words[i-2], k2=words[i-1])
|
||||
log.debug("states retrieved")
|
||||
|
||||
@ -100,7 +103,7 @@ def generate_sentence(context, topics=None, min_words=15, max_words=30):
|
||||
words.append(MarkovState._stop)
|
||||
elif len(target_hits) > 0:
|
||||
# if there's a target word in the states, pick it
|
||||
target_hit = sysrand.choice(target_hits)
|
||||
target_hit = random.choice(target_hits)
|
||||
log.debug("found a topic hit %s, using it", target_hit)
|
||||
topics.remove(target_hit)
|
||||
words.append(target_hit)
|
||||
@ -126,6 +129,7 @@ def generate_sentence(context, topics=None, min_words=15, max_words=30):
|
||||
|
||||
def get_or_create_target_context(target_name):
|
||||
"""Return the context for a provided nick/channel, creating missing ones."""
|
||||
|
||||
target_name = target_name.lower()
|
||||
|
||||
# find the stuff, or create it
|
||||
@ -152,6 +156,7 @@ def get_or_create_target_context(target_name):
|
||||
|
||||
def get_word_out_of_states(states, backwards=False):
|
||||
"""Pick one random word out of the given states."""
|
||||
|
||||
# work around possible broken data, where a k1,k2 should have a value but doesn't
|
||||
if len(states) == 0:
|
||||
states = MarkovState.objects.filter(v=MarkovState._stop)
|
||||
@ -163,9 +168,9 @@ def get_word_out_of_states(states, backwards=False):
|
||||
# this being None probably means there's no data for this context
|
||||
raise ValueError("no markov states to generate from")
|
||||
|
||||
hit = sysrand.randint(0, count_sum)
|
||||
hit = random.randint(0, count_sum)
|
||||
|
||||
log.debug("sum: %s hit: %s", count_sum, hit)
|
||||
log.debug("sum: {0:d} hit: {1:d}".format(count_sum, hit))
|
||||
|
||||
states_itr = states.iterator()
|
||||
for state in states_itr:
|
||||
@ -178,12 +183,13 @@ def get_word_out_of_states(states, backwards=False):
|
||||
|
||||
break
|
||||
|
||||
log.debug("found '%s'", new_word)
|
||||
log.debug("found '{0:s}'".format(new_word))
|
||||
return new_word
|
||||
|
||||
|
||||
def learn_line(line, context):
|
||||
"""Create a bunch of MarkovStates for a given line of text."""
|
||||
|
||||
log.debug("learning %s...", line[:40])
|
||||
|
||||
words = line.split()
|
||||
@ -194,7 +200,7 @@ def learn_line(line, context):
|
||||
return
|
||||
|
||||
for i, word in enumerate(words):
|
||||
log.debug("'%s','%s' -> '%s'", words[i], words[i+1], words[i+2])
|
||||
log.debug("'{0:s}','{1:s}' -> '{2:s}'".format(words[i], words[i+1], words[i+2]))
|
||||
state, created = MarkovState.objects.get_or_create(context=context,
|
||||
k1=words[i],
|
||||
k2=words[i+1],
|
||||
|
@ -1,22 +1,30 @@
|
||||
"""Save brain pieces as markov chains for chaining."""
|
||||
"""
|
||||
markov/models.py --- save brain pieces for chaining
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from django.db import models
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
log = logging.getLogger('markov.models')
|
||||
|
||||
|
||||
class MarkovContext(models.Model):
|
||||
|
||||
"""Define contexts for Markov chains."""
|
||||
|
||||
name = models.CharField(max_length=200, unique=True)
|
||||
|
||||
def __str__(self):
|
||||
"""Provide string representation."""
|
||||
"""String representation."""
|
||||
|
||||
return "{0:s}".format(self.name)
|
||||
|
||||
|
||||
class MarkovTarget(models.Model):
|
||||
|
||||
"""Define IRC targets that relate to a context, and can occasionally be talked to."""
|
||||
|
||||
name = models.CharField(max_length=200, unique=True)
|
||||
@ -25,11 +33,13 @@ class MarkovTarget(models.Model):
|
||||
chatter_chance = models.IntegerField(default=0)
|
||||
|
||||
def __str__(self):
|
||||
"""Provide string representation."""
|
||||
"""String representation."""
|
||||
|
||||
return "{0:s} -> {1:s}".format(self.name, self.context.name)
|
||||
|
||||
|
||||
class MarkovState(models.Model):
|
||||
|
||||
"""One element in a Markov chain, some text or something."""
|
||||
|
||||
_start1 = '__start1'
|
||||
@ -44,8 +54,6 @@ class MarkovState(models.Model):
|
||||
context = models.ForeignKey(MarkovContext, on_delete=models.CASCADE)
|
||||
|
||||
class Meta:
|
||||
"""Options for the model itself."""
|
||||
|
||||
index_together = [
|
||||
['context', 'k1', 'k2'],
|
||||
['context', 'v'],
|
||||
@ -57,5 +65,6 @@ class MarkovState(models.Model):
|
||||
unique_together = ('context', 'k1', 'k2', 'v')
|
||||
|
||||
def __str__(self):
|
||||
"""Provide string representation."""
|
||||
"""String representation."""
|
||||
|
||||
return "{0:s},{1:s} -> {2:s} (count: {3:d})".format(self.k1, self.k2, self.v, self.count)
|
||||
|
Loading…
x
Reference in New Issue
Block a user