2011-01-18 22:30:59 -06:00
|
|
|
"""
|
|
|
|
Markov - Chatterbot via Markov chains for IRC
|
|
|
|
Copyright (C) 2010 Brian S. Stephan
|
|
|
|
|
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
"""
|
|
|
|
|
2011-05-01 10:31:20 -05:00
|
|
|
from datetime import datetime
|
2011-01-18 22:30:59 -06:00
|
|
|
import random
|
|
|
|
import re
|
2011-06-20 21:18:55 -05:00
|
|
|
import thread
|
|
|
|
import time
|
2011-01-18 22:30:59 -06:00
|
|
|
|
2012-07-27 20:38:45 -05:00
|
|
|
from dateutil.relativedelta import *
|
2012-07-27 02:18:01 -05:00
|
|
|
import MySQLdb as mdb
|
|
|
|
|
2011-01-18 22:30:59 -06:00
|
|
|
from extlib import irclib
|
|
|
|
|
|
|
|
from Module import Module
|
|
|
|
|
|
|
|
class Markov(Module):
|
|
|
|
|
|
|
|
"""
|
|
|
|
Create a chatterbot very similar to a MegaHAL, but simpler and
|
|
|
|
implemented in pure Python. Proof of concept code from Ape.
|
2011-01-20 14:15:10 -06:00
|
|
|
|
2011-01-18 22:30:59 -06:00
|
|
|
Ape wrote: based on this:
|
|
|
|
http://uswaretech.com/blog/2009/06/pseudo-random-text-markov-chains-python/
|
|
|
|
and this:
|
|
|
|
http://code.activestate.com/recipes/194364-the-markov-chain-algorithm/
|
|
|
|
"""
|
2011-01-20 14:15:10 -06:00
|
|
|
|
2011-01-18 22:30:59 -06:00
|
|
|
def __init__(self, irc, config, server):
|
|
|
|
"""Create the Markov chainer, and learn text from a file if available."""
|
|
|
|
|
|
|
|
# set up some keywords for use in the chains --- don't change these
|
|
|
|
# once you've created a brain
|
|
|
|
self.start1 = '__start1'
|
|
|
|
self.start2 = '__start2'
|
|
|
|
self.stop = '__stop'
|
|
|
|
|
|
|
|
# set up regexes, for replying to specific stuff
|
2011-01-19 10:20:20 -06:00
|
|
|
learnpattern = '^!markov\s+learn\s+(.*)$'
|
2011-01-25 20:25:15 -06:00
|
|
|
replypattern = '^!markov\s+reply(\s+min=(\d+))?(\s+max=(\d+))?(\s+(.*)$|$)'
|
2011-01-18 22:30:59 -06:00
|
|
|
|
|
|
|
self.learnre = re.compile(learnpattern)
|
|
|
|
self.replyre = re.compile(replypattern)
|
|
|
|
|
2011-04-30 15:43:59 -05:00
|
|
|
self.shut_up = False
|
2011-05-01 10:31:20 -05:00
|
|
|
self.lines_seen = []
|
2011-04-30 15:43:59 -05:00
|
|
|
|
2011-02-24 20:39:32 -06:00
|
|
|
Module.__init__(self, irc, config, server)
|
|
|
|
|
2011-06-20 21:18:55 -05:00
|
|
|
self.next_shut_up_check = 0
|
2011-06-20 22:49:25 -05:00
|
|
|
self.next_chatter_check = 0
|
|
|
|
self.connection = None
|
2011-06-20 21:18:55 -05:00
|
|
|
thread.start_new_thread(self.thread_do, ())
|
|
|
|
|
2012-04-05 21:24:41 -05:00
|
|
|
irc.xmlrpc_register_function(self._generate_line, "markov_generate_line")
|
|
|
|
|
2011-02-24 20:39:32 -06:00
|
|
|
def db_init(self):
|
|
|
|
"""Create the markov chain table."""
|
|
|
|
|
|
|
|
version = self.db_module_registered(self.__class__.__name__)
|
2012-07-27 02:18:01 -05:00
|
|
|
if version == None:
|
2011-02-24 20:39:32 -06:00
|
|
|
db = self.get_db()
|
|
|
|
try:
|
2012-07-27 02:18:01 -05:00
|
|
|
version = 1
|
|
|
|
cur = db.cursor(mdb.cursors.DictCursor)
|
|
|
|
cur.execute('''
|
2012-02-28 23:23:14 -06:00
|
|
|
CREATE TABLE markov_chatter_target (
|
2012-07-27 02:18:01 -05:00
|
|
|
id SERIAL,
|
|
|
|
target VARCHAR(256) NOT NULL,
|
2012-02-28 23:23:14 -06:00
|
|
|
chance INTEGER NOT NULL DEFAULT 99999
|
2012-07-27 14:57:41 -05:00
|
|
|
) ENGINE=InnoDB CHARACTER SET utf8 COLLATE utf8_bin
|
2012-07-27 02:18:01 -05:00
|
|
|
''')
|
|
|
|
cur.execute('''
|
2011-04-23 16:07:32 -05:00
|
|
|
CREATE TABLE markov_context (
|
2012-07-27 02:18:01 -05:00
|
|
|
id SERIAL,
|
|
|
|
context VARCHAR(256) NOT NULL
|
2012-07-27 14:57:41 -05:00
|
|
|
) ENGINE=InnoDB CHARACTER SET utf8 COLLATE utf8_bin
|
2012-07-27 02:18:01 -05:00
|
|
|
''')
|
|
|
|
cur.execute('''
|
2011-04-23 16:07:32 -05:00
|
|
|
CREATE TABLE markov_target_to_context_map (
|
2012-07-27 02:18:01 -05:00
|
|
|
id SERIAL,
|
|
|
|
target VARCHAR(256) NOT NULL,
|
|
|
|
context_id BIGINT(20) UNSIGNED NOT NULL,
|
2011-04-23 16:07:32 -05:00
|
|
|
FOREIGN KEY(context_id) REFERENCES markov_context(id)
|
2012-07-27 14:57:41 -05:00
|
|
|
) ENGINE=InnoDB CHARACTER SET utf8 COLLATE utf8_bin
|
2012-07-27 02:18:01 -05:00
|
|
|
''')
|
|
|
|
cur.execute('''
|
2012-02-28 23:23:14 -06:00
|
|
|
CREATE TABLE markov_chain (
|
2012-07-27 02:18:01 -05:00
|
|
|
id SERIAL,
|
|
|
|
k1 VARCHAR(128) NOT NULL,
|
|
|
|
k2 VARCHAR(128) NOT NULL,
|
|
|
|
v VARCHAR(128) NOT NULL,
|
|
|
|
context_id BIGINT(20) UNSIGNED NOT NULL,
|
2012-02-28 23:23:14 -06:00
|
|
|
FOREIGN KEY(context_id) REFERENCES markov_context(id)
|
2012-07-27 14:57:41 -05:00
|
|
|
) ENGINE=InnoDB CHARACTER SET utf8 COLLATE utf8_bin
|
2012-07-27 02:18:01 -05:00
|
|
|
''')
|
|
|
|
cur.execute('''
|
2012-02-28 23:23:14 -06:00
|
|
|
CREATE INDEX markov_chain_keys_and_context_id_index
|
|
|
|
ON markov_chain (k1, k2, context_id)''')
|
|
|
|
|
2012-07-27 02:18:01 -05:00
|
|
|
cur.execute('''
|
2012-02-28 23:23:14 -06:00
|
|
|
CREATE INDEX markov_chain_value_and_context_id_index
|
|
|
|
ON markov_chain (v, context_id)''')
|
|
|
|
|
2011-10-16 21:13:27 -05:00
|
|
|
db.commit()
|
|
|
|
self.db_register_module_version(self.__class__.__name__, version)
|
2012-07-27 02:18:01 -05:00
|
|
|
except mdb.Error as e:
|
2011-10-16 21:13:27 -05:00
|
|
|
db.rollback()
|
2012-07-27 02:18:01 -05:00
|
|
|
self.log.error("database error trying to create tables")
|
|
|
|
self.log.exception(e)
|
2011-10-16 21:13:27 -05:00
|
|
|
raise
|
2012-07-27 02:18:01 -05:00
|
|
|
finally: cur.close()
|
2011-01-18 22:30:59 -06:00
|
|
|
|
rewrite recursion/alias code for the 500th time.
more of a moving of the code, actually, it now exists in (an overridden)
_handle_event, so that recursions happen against irc events directly,
rather than an already partially interpreted object.
with this change, modules don't need to implement do() nor do we have a
need for the internal_bus, which was doing an additional walk of the
modules after the irc event was already handled and turned into text. now
the core event handler does the recursion scans.
to support this, we bring back the old replypath trick and use it again,
so we know when to send a privmsg reply and when to return text so that
it may be chained in recursion. this feels old hat by now, but if you
haven't been following along, you should really look at the diff.
that's the meat of the change. the rest is updating modules to use
self.reply() and reimplementing (un)register_handlers where appropriate
2011-02-17 01:08:45 -06:00
|
|
|
def register_handlers(self):
|
2011-01-18 22:30:59 -06:00
|
|
|
"""Handle pubmsg/privmsg, to learn and/or reply to IRC events."""
|
|
|
|
|
rewrite recursion/alias code for the 500th time.
more of a moving of the code, actually, it now exists in (an overridden)
_handle_event, so that recursions happen against irc events directly,
rather than an already partially interpreted object.
with this change, modules don't need to implement do() nor do we have a
need for the internal_bus, which was doing an additional walk of the
modules after the irc event was already handled and turned into text. now
the core event handler does the recursion scans.
to support this, we bring back the old replypath trick and use it again,
so we know when to send a privmsg reply and when to return text so that
it may be chained in recursion. this feels old hat by now, but if you
haven't been following along, you should really look at the diff.
that's the meat of the change. the rest is updating modules to use
self.reply() and reimplementing (un)register_handlers where appropriate
2011-02-17 01:08:45 -06:00
|
|
|
self.server.add_global_handler('pubmsg', self.on_pub_or_privmsg, self.priority())
|
|
|
|
self.server.add_global_handler('privmsg', self.on_pub_or_privmsg, self.priority())
|
2011-01-18 22:30:59 -06:00
|
|
|
self.server.add_global_handler('pubmsg', self.learn_from_irc_event)
|
|
|
|
self.server.add_global_handler('privmsg', self.learn_from_irc_event)
|
|
|
|
|
|
|
|
def unregister_handlers(self):
|
rewrite recursion/alias code for the 500th time.
more of a moving of the code, actually, it now exists in (an overridden)
_handle_event, so that recursions happen against irc events directly,
rather than an already partially interpreted object.
with this change, modules don't need to implement do() nor do we have a
need for the internal_bus, which was doing an additional walk of the
modules after the irc event was already handled and turned into text. now
the core event handler does the recursion scans.
to support this, we bring back the old replypath trick and use it again,
so we know when to send a privmsg reply and when to return text so that
it may be chained in recursion. this feels old hat by now, but if you
haven't been following along, you should really look at the diff.
that's the meat of the change. the rest is updating modules to use
self.reply() and reimplementing (un)register_handlers where appropriate
2011-02-17 01:08:45 -06:00
|
|
|
self.server.remove_global_handler('pubmsg', self.on_pub_or_privmsg)
|
|
|
|
self.server.remove_global_handler('privmsg', self.on_pub_or_privmsg)
|
2011-01-18 22:30:59 -06:00
|
|
|
self.server.remove_global_handler('pubmsg', self.learn_from_irc_event)
|
|
|
|
self.server.remove_global_handler('privmsg', self.learn_from_irc_event)
|
|
|
|
|
|
|
|
def learn_from_irc_event(self, connection, event):
|
|
|
|
"""Learn from IRC events."""
|
|
|
|
|
|
|
|
what = ''.join(event.arguments()[0])
|
2011-04-22 19:40:36 -05:00
|
|
|
my_nick = connection.get_nickname()
|
|
|
|
what = re.sub('^' + my_nick + '[:,]\s+', '', what)
|
2011-04-23 16:07:32 -05:00
|
|
|
target = event.target()
|
2011-05-01 10:31:20 -05:00
|
|
|
nick = irclib.nm_to_n(event.source())
|
|
|
|
|
2012-03-19 00:12:29 -05:00
|
|
|
if not irclib.is_channel(target):
|
|
|
|
target = nick
|
|
|
|
|
2011-05-01 10:31:20 -05:00
|
|
|
self.lines_seen.append((nick, datetime.now()))
|
|
|
|
self.connection = connection
|
2011-01-18 22:30:59 -06:00
|
|
|
|
|
|
|
# don't learn from commands
|
2012-03-19 00:12:29 -05:00
|
|
|
if self.learnre.search(what) or self.replyre.search(what):
|
2011-01-18 22:30:59 -06:00
|
|
|
return
|
|
|
|
|
2012-03-29 20:07:32 -05:00
|
|
|
self._learn_line(what, target, event)
|
2011-01-18 22:30:59 -06:00
|
|
|
|
|
|
|
def do(self, connection, event, nick, userhost, what, admin_unlocked):
|
|
|
|
"""Handle commands and inputs."""
|
|
|
|
|
2011-06-14 22:10:57 -05:00
|
|
|
target = event.target()
|
|
|
|
|
2012-03-19 00:12:29 -05:00
|
|
|
if self.learnre.search(what):
|
2012-02-28 23:23:14 -06:00
|
|
|
return self.reply(connection, event, self.markov_learn(connection, event, nick,
|
|
|
|
userhost, what, admin_unlocked))
|
2011-04-30 15:43:59 -05:00
|
|
|
elif self.replyre.search(what) and not self.shut_up:
|
2012-02-28 23:23:14 -06:00
|
|
|
return self.reply(connection, event, self.markov_reply(connection, event, nick,
|
|
|
|
userhost, what, admin_unlocked))
|
2011-01-18 22:30:59 -06:00
|
|
|
|
2011-04-30 15:43:59 -05:00
|
|
|
if not self.shut_up:
|
|
|
|
# not a command, so see if i'm being mentioned
|
|
|
|
if re.search(connection.get_nickname(), what, re.IGNORECASE) is not None:
|
|
|
|
addressed_pattern = '^' + connection.get_nickname() + '[:,]\s+(.*)'
|
|
|
|
addressed_re = re.compile(addressed_pattern)
|
|
|
|
if addressed_re.match(what):
|
|
|
|
# i was addressed directly, so respond, addressing the speaker
|
2011-05-01 10:31:20 -05:00
|
|
|
self.lines_seen.append(('.self.said.', datetime.now()))
|
2012-02-28 23:23:14 -06:00
|
|
|
return self.reply(connection, event, '{0:s}: {1:s}'.format(nick,
|
|
|
|
self._generate_line(target, line=addressed_re.match(what).group(1))))
|
2011-04-30 15:43:59 -05:00
|
|
|
else:
|
|
|
|
# i wasn't addressed directly, so just respond
|
2011-05-01 10:31:20 -05:00
|
|
|
self.lines_seen.append(('.self.said.', datetime.now()))
|
2011-06-15 12:29:18 -05:00
|
|
|
return self.reply(connection, event, '{0:s}'.format(self._generate_line(target, line=what)))
|
2011-01-18 22:30:59 -06:00
|
|
|
|
|
|
|
def markov_learn(self, connection, event, nick, userhost, what, admin_unlocked):
|
|
|
|
"""Learn one line, as provided to the command."""
|
|
|
|
|
2011-04-23 16:07:32 -05:00
|
|
|
target = event.target()
|
2011-01-18 22:30:59 -06:00
|
|
|
match = self.learnre.search(what)
|
|
|
|
if match:
|
|
|
|
line = match.group(1)
|
2012-03-29 20:07:32 -05:00
|
|
|
self._learn_line(line, target, event)
|
2011-01-18 22:30:59 -06:00
|
|
|
|
2011-01-24 16:51:05 -06:00
|
|
|
# return what was learned, for weird chaining purposes
|
|
|
|
return line
|
|
|
|
|
2011-01-18 22:30:59 -06:00
|
|
|
def markov_reply(self, connection, event, nick, userhost, what, admin_unlocked):
|
|
|
|
"""Generate a reply to one line, without learning it."""
|
|
|
|
|
2011-06-14 22:10:57 -05:00
|
|
|
target = event.target()
|
2011-01-18 22:30:59 -06:00
|
|
|
match = self.replyre.search(what)
|
|
|
|
if match:
|
2011-01-25 20:25:15 -06:00
|
|
|
min_size = 15
|
|
|
|
max_size = 100
|
|
|
|
|
2011-01-18 22:30:59 -06:00
|
|
|
if match.group(2):
|
2011-01-25 20:25:15 -06:00
|
|
|
min_size = int(match.group(2))
|
|
|
|
if match.group(4):
|
|
|
|
max_size = int(match.group(4))
|
|
|
|
|
|
|
|
if match.group(5) != '':
|
|
|
|
line = match.group(6)
|
2011-05-01 10:31:20 -05:00
|
|
|
self.lines_seen.append(('.self.said.', datetime.now()))
|
2011-06-15 12:29:18 -05:00
|
|
|
return self._generate_line(target, line=line, min_size=min_size, max_size=max_size)
|
2011-01-18 22:30:59 -06:00
|
|
|
else:
|
2011-05-01 10:31:20 -05:00
|
|
|
self.lines_seen.append(('.self.said.', datetime.now()))
|
2011-06-15 12:29:18 -05:00
|
|
|
return self._generate_line(target, min_size=min_size, max_size=max_size)
|
2011-01-18 22:30:59 -06:00
|
|
|
|
2011-06-20 21:18:55 -05:00
|
|
|
def thread_do(self):
|
2011-05-01 10:31:20 -05:00
|
|
|
"""Do various things."""
|
|
|
|
|
2011-06-20 21:18:55 -05:00
|
|
|
while not self.is_shutdown:
|
|
|
|
self._do_shut_up_checks()
|
2011-06-20 22:49:25 -05:00
|
|
|
self._do_random_chatter_check()
|
2011-06-20 21:18:55 -05:00
|
|
|
time.sleep(1)
|
2011-05-01 10:31:20 -05:00
|
|
|
|
2011-06-20 22:49:25 -05:00
|
|
|
def _do_random_chatter_check(self):
|
|
|
|
"""Randomly say something to a channel."""
|
|
|
|
|
|
|
|
# don't immediately potentially chatter, let the bot
|
|
|
|
# join channels first
|
|
|
|
if self.next_chatter_check == 0:
|
|
|
|
self.next_chatter_check = time.time() + 600
|
|
|
|
|
|
|
|
if self.next_chatter_check < time.time():
|
|
|
|
self.next_chatter_check = time.time() + 600
|
|
|
|
|
|
|
|
if self.connection is None:
|
|
|
|
# i haven't seen any text yet...
|
|
|
|
return
|
|
|
|
|
|
|
|
targets = self._get_chatter_targets()
|
|
|
|
for t in targets:
|
2011-10-21 16:59:57 -05:00
|
|
|
if t['chance'] > 0:
|
|
|
|
a = random.randint(1, t['chance'])
|
|
|
|
if a == 1:
|
|
|
|
self.sendmsg(self.connection, t['target'], self._generate_line(t['target']))
|
2011-06-20 22:49:25 -05:00
|
|
|
|
2011-05-01 10:31:20 -05:00
|
|
|
def _do_shut_up_checks(self):
|
|
|
|
"""Check to see if we've been talking too much, and shut up if so."""
|
|
|
|
|
2011-06-20 21:18:55 -05:00
|
|
|
if self.next_shut_up_check < time.time():
|
|
|
|
self.shut_up = False
|
|
|
|
self.next_shut_up_check = time.time() + 30
|
2011-05-01 10:31:20 -05:00
|
|
|
|
2011-06-20 21:18:55 -05:00
|
|
|
last_30_sec_lines = []
|
2011-04-30 15:43:59 -05:00
|
|
|
|
2011-06-20 21:18:55 -05:00
|
|
|
for (nick,then) in self.lines_seen:
|
|
|
|
rdelta = relativedelta(datetime.now(), then)
|
2012-02-28 23:23:14 -06:00
|
|
|
if (rdelta.years == 0 and rdelta.months == 0 and rdelta.days == 0 and
|
|
|
|
rdelta.hours == 0 and rdelta.minutes == 0 and rdelta.seconds <= 29):
|
2011-06-20 21:18:55 -05:00
|
|
|
last_30_sec_lines.append((nick,then))
|
2011-04-30 15:43:59 -05:00
|
|
|
|
2011-09-20 01:20:27 -05:00
|
|
|
if len(last_30_sec_lines) >= 8:
|
2011-06-20 21:18:55 -05:00
|
|
|
lines_i_said = len(filter(lambda (a,b): a == '.self.said.', last_30_sec_lines))
|
|
|
|
if lines_i_said >= 8:
|
|
|
|
self.shut_up = True
|
|
|
|
targets = self._get_chatter_targets()
|
|
|
|
for t in targets:
|
2012-02-28 23:23:14 -06:00
|
|
|
self.sendmsg(self.connection, t['target'],
|
|
|
|
'shutting up for 30 seconds due to last 30 seconds of activity')
|
2011-04-30 15:43:59 -05:00
|
|
|
|
2012-03-29 20:07:32 -05:00
|
|
|
def _learn_line(self, line, target, event):
|
2011-01-18 22:30:59 -06:00
|
|
|
"""Create Markov chains from the provided line."""
|
|
|
|
|
|
|
|
# set up the head of the chain
|
2011-02-24 20:39:32 -06:00
|
|
|
k1 = self.start1
|
|
|
|
k2 = self.start2
|
|
|
|
|
2012-02-28 23:23:14 -06:00
|
|
|
context_id = self._get_context_id_for_target(target)
|
2011-04-23 16:07:32 -05:00
|
|
|
|
2012-03-29 20:07:32 -05:00
|
|
|
# don't learn recursion
|
|
|
|
if not event._recursing:
|
2011-06-16 21:25:22 -05:00
|
|
|
words = line.split()
|
|
|
|
if len(words) <= 0:
|
|
|
|
return line
|
|
|
|
|
2012-07-27 02:18:01 -05:00
|
|
|
db = self.get_db()
|
2011-06-16 21:25:22 -05:00
|
|
|
try:
|
2012-07-27 02:18:01 -05:00
|
|
|
cur = db.cursor(mdb.cursors.DictCursor)
|
|
|
|
statement = 'INSERT INTO markov_chain (k1, k2, v, context_id) VALUES (%s, %s, %s, %s)'
|
2011-06-16 21:25:22 -05:00
|
|
|
for word in words:
|
2012-07-27 16:34:57 -05:00
|
|
|
cur.execute(statement, (k1, k2, word, context_id))
|
2011-06-16 21:25:22 -05:00
|
|
|
k1, k2 = k2, word
|
2012-07-27 16:34:57 -05:00
|
|
|
cur.execute(statement, (k1, k2, self.stop, context_id))
|
2011-06-16 21:25:22 -05:00
|
|
|
|
|
|
|
db.commit()
|
2012-07-27 02:18:01 -05:00
|
|
|
except mdb.Error as e:
|
2011-06-16 21:25:22 -05:00
|
|
|
db.rollback()
|
2012-07-27 02:18:01 -05:00
|
|
|
self.log.error("database error learning line")
|
|
|
|
self.log.exception(e)
|
2011-06-16 21:25:22 -05:00
|
|
|
raise
|
2012-07-27 02:18:01 -05:00
|
|
|
finally: cur.close()
|
2011-01-18 22:30:59 -06:00
|
|
|
|
2011-06-15 12:29:18 -05:00
|
|
|
def _generate_line(self, target, line='', min_size=15, max_size=100):
|
2012-04-05 21:24:41 -05:00
|
|
|
"""
|
|
|
|
Create a line, optionally using some text in a seed as a point in the chain.
|
|
|
|
|
|
|
|
Keyword arguments:
|
|
|
|
target - the target to retrieve the context for (i.e. a channel or nick)
|
|
|
|
line - the line to reply to, by picking a random word and seeding with it
|
2012-07-14 09:22:37 -05:00
|
|
|
min_size - the minimum desired size in words. not guaranteed
|
|
|
|
max_size - the maximum desired size in words. not guaranteed
|
2012-04-05 21:24:41 -05:00
|
|
|
"""
|
2011-01-18 22:30:59 -06:00
|
|
|
|
|
|
|
# if the limit is too low, there's nothing to do
|
2011-01-19 18:35:01 -06:00
|
|
|
if (max_size <= 3):
|
|
|
|
raise Exception("max_size is too small: %d" % max_size)
|
2011-01-18 22:30:59 -06:00
|
|
|
|
2011-01-19 18:44:07 -06:00
|
|
|
# if the min is too large, abort
|
|
|
|
if (min_size > 20):
|
|
|
|
raise Exception("min_size is too large: %d" % min_size)
|
|
|
|
|
2011-04-30 15:37:16 -05:00
|
|
|
words = []
|
|
|
|
target_word = ''
|
2011-01-18 22:30:59 -06:00
|
|
|
# get a random word from the input
|
2011-04-30 15:37:16 -05:00
|
|
|
if line != '':
|
|
|
|
words = line.split()
|
|
|
|
target_word = words[random.randint(0, len(words)-1)]
|
2011-01-18 22:30:59 -06:00
|
|
|
|
2012-02-28 23:23:14 -06:00
|
|
|
context_id = self._get_context_id_for_target(target)
|
2011-06-14 22:10:57 -05:00
|
|
|
|
2011-01-18 22:30:59 -06:00
|
|
|
# start with an empty chain, and work from there
|
2011-04-23 16:27:07 -05:00
|
|
|
gen_words = [self.start1, self.start2]
|
2011-01-18 22:30:59 -06:00
|
|
|
|
|
|
|
# walk a chain, randomly, building the list of words
|
2011-01-19 18:35:01 -06:00
|
|
|
while len(gen_words) < max_size + 2 and gen_words[-1] != self.stop:
|
2011-10-16 20:19:51 -05:00
|
|
|
# first, see if we have an empty response and a target word.
|
|
|
|
# we'll just pick a word and work backwards
|
|
|
|
if gen_words[-1] == self.start2 and target_word != '':
|
|
|
|
working_backwards = []
|
2012-02-28 23:23:14 -06:00
|
|
|
key_hits = self._retrieve_k2_for_value(target_word, context_id)
|
2011-10-16 21:13:27 -05:00
|
|
|
if len(key_hits):
|
|
|
|
working_backwards.append(target_word)
|
2012-07-28 13:32:58 -05:00
|
|
|
self.log.debug("added '{0:s}' to working_backwards".format(target_word))
|
|
|
|
self.log.debug("working_backwards: {0:s}".format(" ".join(working_backwards)))
|
2011-10-16 21:13:27 -05:00
|
|
|
# generate new word
|
|
|
|
found_word = ''
|
|
|
|
target_word = words[random.randint(0, len(words)-1)]
|
|
|
|
# work backwards until we randomly bump into a start
|
|
|
|
while True:
|
2012-02-28 23:23:14 -06:00
|
|
|
key_hits = self._retrieve_k2_for_value(working_backwards[0], context_id)
|
2011-10-16 21:13:27 -05:00
|
|
|
if target_word in key_hits:
|
|
|
|
found_word = target_word
|
|
|
|
# generate new word
|
|
|
|
if len(filter(lambda a: a != target_word, words)) > 1 and False:
|
|
|
|
# if we have more than one target word, get a new one (otherwise give up)
|
|
|
|
target_word = random.choice(filter(lambda a: a != target_word, words))
|
|
|
|
else:
|
|
|
|
target_word = ''
|
|
|
|
else:
|
|
|
|
found_word = random.choice(filter(lambda a: a != self.stop, key_hits))
|
|
|
|
|
|
|
|
if found_word == self.start2 or len(working_backwards) >= max_size + 2:
|
2012-07-28 13:32:58 -05:00
|
|
|
self.log.debug("done working backwards")
|
2011-10-16 21:13:27 -05:00
|
|
|
gen_words = gen_words + working_backwards
|
2012-07-28 13:32:58 -05:00
|
|
|
self.log.debug("gen_words: {0:s}".format(" ".join(gen_words)))
|
2011-10-16 21:13:27 -05:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
working_backwards.insert(0, found_word)
|
2012-07-28 13:32:58 -05:00
|
|
|
self.log.debug("added '{0:s}' to working_backwards".format(found_word))
|
|
|
|
self.log.debug("working_backwards: {0:s}".format(" ".join(working_backwards)))
|
2011-10-16 20:19:51 -05:00
|
|
|
|
2012-02-28 23:23:14 -06:00
|
|
|
key_hits = self._retrieve_chains_for_key(gen_words[-2], gen_words[-1], context_id)
|
2011-01-18 22:30:59 -06:00
|
|
|
# use the chain that includes the target word, if it is found
|
2011-04-30 15:37:16 -05:00
|
|
|
if target_word != '' and target_word in key_hits:
|
2011-01-18 22:30:59 -06:00
|
|
|
gen_words.append(target_word)
|
2012-07-28 13:32:58 -05:00
|
|
|
self.log.debug("added target word '{0:s}' to gen_words".format(target_word))
|
|
|
|
self.log.debug("gen_words: {0:s}".format(" ".join(gen_words)))
|
2011-01-18 22:30:59 -06:00
|
|
|
# generate new word
|
|
|
|
target_word = words[random.randint(0, len(words)-1)]
|
|
|
|
else:
|
2012-07-28 13:55:54 -05:00
|
|
|
gen_words.append(self._get_suitable_word_from_choices(key_hits, gen_words, min_size))
|
|
|
|
|
|
|
|
# tack a new chain onto the list and resume if we're too short
|
|
|
|
if gen_words[-1] == self.stop and len(gen_words) < min_size + 2:
|
|
|
|
self.log.debug("starting a new chain on end of old one")
|
|
|
|
|
|
|
|
# chop off the end text, if it was the keyword indicating an end of chain
|
|
|
|
if gen_words[-1] == self.stop:
|
|
|
|
gen_words = gen_words[:-1]
|
|
|
|
|
|
|
|
# new word 1
|
|
|
|
key_hits = self._retrieve_chains_for_key(self.start1, self.start2, context_id)
|
|
|
|
gen_words.append(self._get_suitable_word_from_choices(key_hits, gen_words, min_size))
|
|
|
|
|
|
|
|
# new word 2
|
|
|
|
key_hits = self._retrieve_chains_for_key(self.start2, found_word, context_id)
|
|
|
|
gen_words.append(self._get_suitable_word_from_choices(key_hits, gen_words, min_size))
|
2011-01-18 22:30:59 -06:00
|
|
|
|
|
|
|
# chop off the seed data at the start
|
|
|
|
gen_words = gen_words[2:]
|
|
|
|
|
|
|
|
# chop off the end text, if it was the keyword indicating an end of chain
|
|
|
|
if gen_words[-1] == self.stop:
|
|
|
|
gen_words = gen_words[:-1]
|
|
|
|
|
2012-07-15 01:11:21 -05:00
|
|
|
return ' '.join(gen_words)
|
2011-01-18 22:30:59 -06:00
|
|
|
|
2012-07-28 13:55:54 -05:00
|
|
|
def _get_suitable_word_from_choices(self, key_hits, gen_words, min_size):
|
|
|
|
"""Given an existing set of words, and key hits, pick one."""
|
|
|
|
|
|
|
|
if len(gen_words) < min_size + 2 and len(filter(lambda a: a != self.stop, key_hits)) > 0:
|
|
|
|
found_word = random.choice(filter(lambda a: a != self.stop, key_hits))
|
|
|
|
self.log.debug("added '{0:s}' to gen_words".format(found_word))
|
|
|
|
self.log.debug("gen_words: {0:s}".format(" ".join(gen_words)))
|
|
|
|
return found_word
|
|
|
|
elif len(key_hits) <= 0:
|
|
|
|
self.log.debug("no hits found, appending stop")
|
|
|
|
self.log.debug("gen_words: {0:s}".format(" ".join(gen_words)))
|
|
|
|
return self.stop
|
|
|
|
else:
|
|
|
|
found_word = random.choice(key_hits)
|
|
|
|
self.log.debug("added '{0:s}' to gen_words".format(found_word))
|
|
|
|
self.log.debug("gen_words: {0:s}".format(" ".join(gen_words)))
|
|
|
|
return found_word
|
|
|
|
|
2012-02-28 23:23:14 -06:00
|
|
|
def _retrieve_chains_for_key(self, k1, k2, context_id):
|
2011-02-24 20:39:32 -06:00
|
|
|
"""Get the value(s) for a given key (a pair of strings)."""
|
|
|
|
|
|
|
|
values = []
|
2012-07-27 02:18:01 -05:00
|
|
|
db = self.get_db()
|
2011-02-24 20:39:32 -06:00
|
|
|
try:
|
Markov: when looking up the start-of-sentence chain, get one random one
when finding a key for (__start1,__start2), instead of fetcihng all
(which can be a lot, in chatty channels and/or over time), get the
max ID in the table, pick a random ID between 1,max, and pick the
first id >= to it, and use that. just as random, nowhere near as
intensive.
2011-04-23 21:24:23 -05:00
|
|
|
query = ''
|
|
|
|
if k1 == self.start1 and k2 == self.start2:
|
|
|
|
# hack. get a quasi-random start from the database, in
|
|
|
|
# a faster fashion than selecting all starts
|
|
|
|
max_id = self._get_max_chain_id()
|
|
|
|
rand_id = random.randint(1,max_id)
|
2012-07-27 02:18:01 -05:00
|
|
|
query = ('SELECT v FROM markov_chain WHERE k1 = %s AND k2 = %s AND '
|
|
|
|
'(context_id = %s) AND id >= {0:d} LIMIT 1'.format(rand_id))
|
Markov: when looking up the start-of-sentence chain, get one random one
when finding a key for (__start1,__start2), instead of fetcihng all
(which can be a lot, in chatty channels and/or over time), get the
max ID in the table, pick a random ID between 1,max, and pick the
first id >= to it, and use that. just as random, nowhere near as
intensive.
2011-04-23 21:24:23 -05:00
|
|
|
else:
|
2012-07-27 02:18:01 -05:00
|
|
|
query = ('SELECT v FROM markov_chain WHERE k1 = %s AND k2 = %s AND '
|
|
|
|
'(context_id = %s)')
|
|
|
|
cur = db.cursor(mdb.cursors.DictCursor)
|
2012-07-27 16:34:57 -05:00
|
|
|
cur.execute(query, (k1, k2, context_id))
|
2012-07-27 02:18:01 -05:00
|
|
|
results = cur.fetchall()
|
2011-02-24 20:39:32 -06:00
|
|
|
|
|
|
|
for result in results:
|
2012-07-27 16:34:57 -05:00
|
|
|
values.append(result['v'])
|
2011-02-24 20:39:32 -06:00
|
|
|
|
|
|
|
return values
|
2012-07-27 02:18:01 -05:00
|
|
|
except mdb.Error as e:
|
|
|
|
self.log.error("database error in _retrieve_chains_for_key")
|
|
|
|
self.log.exception(e)
|
2011-02-24 20:39:32 -06:00
|
|
|
raise
|
2012-07-27 02:18:01 -05:00
|
|
|
finally: cur.close()
|
2011-02-24 20:39:32 -06:00
|
|
|
|
2012-02-28 23:23:14 -06:00
|
|
|
def _retrieve_k2_for_value(self, v, context_id):
|
2011-10-16 20:19:51 -05:00
|
|
|
"""Get the value(s) for a given key (a pair of strings)."""
|
|
|
|
|
|
|
|
values = []
|
2012-07-27 02:18:01 -05:00
|
|
|
db = self.get_db()
|
2011-10-16 20:19:51 -05:00
|
|
|
try:
|
2012-07-27 02:18:01 -05:00
|
|
|
query = 'SELECT k2 FROM markov_chain WHERE v = %s AND context_id = %s'
|
|
|
|
cur = db.cursor(mdb.cursors.DictCursor)
|
2012-07-27 16:34:57 -05:00
|
|
|
cur.execute(query, (v, context_id))
|
2012-07-27 02:18:01 -05:00
|
|
|
results = cur.fetchall()
|
2011-10-16 20:19:51 -05:00
|
|
|
|
|
|
|
for result in results:
|
2012-07-27 16:34:57 -05:00
|
|
|
values.append(result['k2'])
|
2011-10-16 20:19:51 -05:00
|
|
|
|
|
|
|
return values
|
2012-07-27 02:18:01 -05:00
|
|
|
except mdb.Error as e:
|
|
|
|
self.log.error("database error in _retrieve_k2_for_value")
|
|
|
|
self.log.exception(e)
|
2011-10-16 20:19:51 -05:00
|
|
|
raise
|
2012-07-27 02:18:01 -05:00
|
|
|
finally: cur.close()
|
2011-10-16 20:19:51 -05:00
|
|
|
|
2011-05-01 09:47:45 -05:00
|
|
|
def _get_chatter_targets(self):
|
|
|
|
"""Get all possible chatter targets."""
|
|
|
|
|
2012-07-27 02:18:01 -05:00
|
|
|
db = self.get_db()
|
2011-05-01 09:47:45 -05:00
|
|
|
try:
|
|
|
|
# need to create our own db object, since this is likely going to be in a new thread
|
2011-06-20 22:49:25 -05:00
|
|
|
query = 'SELECT target, chance FROM markov_chatter_target'
|
2012-07-27 02:18:01 -05:00
|
|
|
cur = db.cursor(mdb.cursors.DictCursor)
|
|
|
|
cur.execute(query)
|
|
|
|
results = cur.fetchall()
|
2011-06-20 22:49:25 -05:00
|
|
|
return results
|
2012-07-27 02:18:01 -05:00
|
|
|
except mdb.Error as e:
|
|
|
|
self.log.error("database error in _get_chatter_targets")
|
|
|
|
self.log.exception(e)
|
2011-05-01 09:47:45 -05:00
|
|
|
raise
|
2012-07-27 02:18:01 -05:00
|
|
|
finally: cur.close()
|
2011-05-01 09:47:45 -05:00
|
|
|
|
|
|
|
def _get_one_chatter_target(self):
|
|
|
|
"""Select one random chatter target."""
|
|
|
|
|
|
|
|
targets = self._get_chatter_targets()
|
|
|
|
if targets:
|
|
|
|
return targets[random.randint(0, len(targets)-1)]
|
|
|
|
|
Markov: when looking up the start-of-sentence chain, get one random one
when finding a key for (__start1,__start2), instead of fetcihng all
(which can be a lot, in chatty channels and/or over time), get the
max ID in the table, pick a random ID between 1,max, and pick the
first id >= to it, and use that. just as random, nowhere near as
intensive.
2011-04-23 21:24:23 -05:00
|
|
|
def _get_max_chain_id(self):
|
|
|
|
"""Get the highest id in the chain table."""
|
|
|
|
|
2012-07-27 02:18:01 -05:00
|
|
|
db = self.get_db()
|
Markov: when looking up the start-of-sentence chain, get one random one
when finding a key for (__start1,__start2), instead of fetcihng all
(which can be a lot, in chatty channels and/or over time), get the
max ID in the table, pick a random ID between 1,max, and pick the
first id >= to it, and use that. just as random, nowhere near as
intensive.
2011-04-23 21:24:23 -05:00
|
|
|
try:
|
|
|
|
query = '''
|
|
|
|
SELECT id FROM markov_chain ORDER BY id DESC LIMIT 1
|
|
|
|
'''
|
2012-07-27 02:18:01 -05:00
|
|
|
cur = db.cursor(mdb.cursors.DictCursor)
|
|
|
|
cur.execute(query)
|
|
|
|
result = cur.fetchone()
|
Markov: when looking up the start-of-sentence chain, get one random one
when finding a key for (__start1,__start2), instead of fetcihng all
(which can be a lot, in chatty channels and/or over time), get the
max ID in the table, pick a random ID between 1,max, and pick the
first id >= to it, and use that. just as random, nowhere near as
intensive.
2011-04-23 21:24:23 -05:00
|
|
|
if result:
|
|
|
|
return result['id']
|
|
|
|
else:
|
|
|
|
return None
|
2012-07-27 02:18:01 -05:00
|
|
|
except mdb.Error as e:
|
|
|
|
self.log.error("database error in _get_max_chain_id")
|
|
|
|
self.log.exception(e)
|
2012-02-28 23:23:14 -06:00
|
|
|
raise
|
2012-07-27 02:18:01 -05:00
|
|
|
finally: cur.close()
|
2012-02-28 23:23:14 -06:00
|
|
|
|
|
|
|
def _get_context_id_for_target(self, target):
|
|
|
|
|
|
|
|
"""Get the context ID for the desired/input target."""
|
|
|
|
|
2012-07-27 02:18:01 -05:00
|
|
|
db = self.get_db()
|
2012-02-28 23:23:14 -06:00
|
|
|
try:
|
|
|
|
query = '''
|
|
|
|
SELECT mc.id FROM markov_context mc
|
|
|
|
INNER JOIN markov_target_to_context_map mt
|
|
|
|
ON mt.context_id = mc.id
|
2012-07-27 02:18:01 -05:00
|
|
|
WHERE mt.target = %s
|
2012-02-28 23:23:14 -06:00
|
|
|
'''
|
2012-07-27 02:18:01 -05:00
|
|
|
cur = db.cursor(mdb.cursors.DictCursor)
|
|
|
|
cur.execute(query, (target,))
|
|
|
|
result = cur.fetchone()
|
2012-02-28 23:23:14 -06:00
|
|
|
db.close()
|
|
|
|
if result:
|
|
|
|
return result['id']
|
|
|
|
else:
|
2012-03-19 00:12:29 -05:00
|
|
|
# auto-generate a context to keep things private
|
|
|
|
self._add_context_for_target(target)
|
|
|
|
return self._get_context_id_for_target(target)
|
2012-07-27 02:18:01 -05:00
|
|
|
except mdb.Error as e:
|
|
|
|
self.log.error("database error in _get_context_id_for_target")
|
|
|
|
self.log.exception(e)
|
2012-03-19 00:12:29 -05:00
|
|
|
raise
|
2012-07-27 02:18:01 -05:00
|
|
|
finally: cur.close()
|
2012-03-19 00:12:29 -05:00
|
|
|
|
|
|
|
def _add_context_for_target(self, target):
|
|
|
|
|
|
|
|
"""Create a new context for the desired/input target."""
|
|
|
|
|
2012-07-27 02:18:01 -05:00
|
|
|
db = self.get_db()
|
2012-03-19 00:12:29 -05:00
|
|
|
try:
|
2012-07-27 02:18:01 -05:00
|
|
|
statement = 'INSERT INTO markov_context (context) VALUES (%s)'
|
|
|
|
cur = db.cursor(mdb.cursors.DictCursor)
|
2012-03-19 00:12:29 -05:00
|
|
|
cur.execute(statement, (target,))
|
|
|
|
statement = '''
|
|
|
|
INSERT INTO markov_target_to_context_map (target, context_id)
|
2012-07-27 02:18:01 -05:00
|
|
|
VALUES (%s, (SELECT id FROM markov_context WHERE context = %s))
|
2012-03-19 00:12:29 -05:00
|
|
|
'''
|
|
|
|
cur.execute(statement, (target,target))
|
|
|
|
db.commit()
|
2012-07-27 02:18:01 -05:00
|
|
|
except mdb.Error as e:
|
2012-03-19 00:12:29 -05:00
|
|
|
db.rollback()
|
2012-07-27 02:18:01 -05:00
|
|
|
self.log.error("database error in _add_context_for_target")
|
|
|
|
self.log.exception(e)
|
2012-03-19 00:12:29 -05:00
|
|
|
raise
|
2012-07-27 02:18:01 -05:00
|
|
|
finally: cur.close()
|
|
|
|
|
2012-03-19 00:12:29 -05:00
|
|
|
try:
|
|
|
|
query = '''
|
|
|
|
SELECT mc.id FROM markov_context mc
|
|
|
|
INNER JOIN markov_target_to_context_map mt
|
|
|
|
ON mt.context_id = mc.id
|
2012-07-27 02:18:01 -05:00
|
|
|
WHERE mt.target = %s
|
2012-03-19 00:12:29 -05:00
|
|
|
'''
|
2012-07-27 02:18:01 -05:00
|
|
|
cur = db.cursor(mdb.cursors.DictCursor)
|
|
|
|
cur.execute(query, (target,))
|
|
|
|
result = cur.fetchone()
|
2012-03-19 00:12:29 -05:00
|
|
|
if result:
|
|
|
|
return result['id']
|
|
|
|
else:
|
|
|
|
# auto-generate a context to keep things private
|
|
|
|
self._add_context_for_target(target)
|
|
|
|
return self._get_context_id_for_target(target)
|
2012-07-27 02:18:01 -05:00
|
|
|
except mdb.Error as e:
|
|
|
|
self.log.error("database error in _get_context_id_for_target")
|
|
|
|
self.log.exception(e)
|
Markov: when looking up the start-of-sentence chain, get one random one
when finding a key for (__start1,__start2), instead of fetcihng all
(which can be a lot, in chatty channels and/or over time), get the
max ID in the table, pick a random ID between 1,max, and pick the
first id >= to it, and use that. just as random, nowhere near as
intensive.
2011-04-23 21:24:23 -05:00
|
|
|
raise
|
2012-07-27 02:18:01 -05:00
|
|
|
finally: cur.close()
|
Markov: when looking up the start-of-sentence chain, get one random one
when finding a key for (__start1,__start2), instead of fetcihng all
(which can be a lot, in chatty channels and/or over time), get the
max ID in the table, pick a random ID between 1,max, and pick the
first id >= to it, and use that. just as random, nowhere near as
intensive.
2011-04-23 21:24:23 -05:00
|
|
|
|
2011-01-18 22:30:59 -06:00
|
|
|
# vi:tabstop=4:expandtab:autoindent
|
|
|
|
# kate: indent-mode python;indent-width 4;replace-tabs on;
|