2016-05-18 09:43:43 -04:00
|
|
|
|
# hearts.py
|
|
|
|
|
# Keepin' score is better with <3.
|
|
|
|
|
# Eryn Wells <eryn@erynwells.me>
|
|
|
|
|
|
|
|
|
|
import json
|
2016-05-19 11:59:33 -04:00
|
|
|
|
import logging
|
2016-05-18 09:43:43 -04:00
|
|
|
|
import os.path
|
2016-09-11 15:10:51 -04:00
|
|
|
|
import random
|
2016-05-18 09:43:43 -04:00
|
|
|
|
import re
|
|
|
|
|
|
2016-09-11 20:01:21 -04:00
|
|
|
|
from service import slack
|
|
|
|
|
|
2016-05-18 09:43:43 -04:00
|
|
|
|
HEARTS_FILE = 'hearts.json'
|
|
|
|
|
|
2016-05-19 12:14:16 -04:00
|
|
|
|
PLUSES = ['++', '<3', '<3', ':heart:', ':yellow_heart:', ':green_heart:', ':blue_heart:', ':purple_heart:', '❤️', '💛', '💚', '💙', '💜']
|
|
|
|
|
MINUSES = ['--', '–', '—', '</3', '</3', ':broken_heart:', '💔']
|
2016-09-11 15:10:51 -04:00
|
|
|
|
SASS = ['r u srs rn', 'no', 'noooope']
|
2016-05-19 11:59:33 -04:00
|
|
|
|
|
|
|
|
|
LOGGER = logging.getLogger('hearts')
|
2016-05-18 09:43:43 -04:00
|
|
|
|
|
2016-09-11 15:10:51 -04:00
|
|
|
|
LEADERS_RE = re.compile('!(top|bottom)(\d+)')
|
|
|
|
|
WHITESPACE_RE = re.compile('\s+')
|
2016-09-11 20:01:21 -04:00
|
|
|
|
LINK_RE = re.compile(r'<(?P<type>[@#!])(?P<id>\w+)(\|(?P<name>\w+))?>')
|
2016-09-11 15:10:51 -04:00
|
|
|
|
EMOJI_RE = re.compile(r':\w+:')
|
|
|
|
|
|
2016-09-11 20:01:21 -04:00
|
|
|
|
# rtmbot interface
|
2016-05-18 09:43:43 -04:00
|
|
|
|
outputs = []
|
|
|
|
|
|
2016-09-11 20:01:21 -04:00
|
|
|
|
USERS = []
|
|
|
|
|
CHANNELS = []
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# RTM
|
|
|
|
|
#
|
|
|
|
|
|
|
|
|
|
def process_hello(data):
|
|
|
|
|
global USERS
|
|
|
|
|
USERS = slack.users()
|
|
|
|
|
CHANNELS = slack.channels()
|
|
|
|
|
|
2016-05-18 09:43:43 -04:00
|
|
|
|
def process_message(data):
|
|
|
|
|
try:
|
2016-05-19 11:59:33 -04:00
|
|
|
|
text = data['text'].strip()
|
2016-05-18 09:43:43 -04:00
|
|
|
|
except KeyError:
|
|
|
|
|
# TODO: Make this better.
|
|
|
|
|
return
|
|
|
|
|
|
2016-09-11 15:10:51 -04:00
|
|
|
|
leaders_m = LEADERS_RE.match(text)
|
|
|
|
|
if leaders_m:
|
|
|
|
|
top = leaders_m.group(1) == 'top'
|
|
|
|
|
try:
|
|
|
|
|
n = int(leaders_m.group(2))
|
|
|
|
|
except ValueError:
|
|
|
|
|
outputs.append([data['channel'], random.choice(SASS)])
|
|
|
|
|
return
|
|
|
|
|
if n == 0:
|
|
|
|
|
outputs.append([data['channel'], random.choice(SASS)])
|
|
|
|
|
return
|
|
|
|
|
scores = leaders(n, top)
|
|
|
|
|
if scores:
|
|
|
|
|
outputs.append([data['channel'], scores])
|
2016-07-06 01:54:06 -04:00
|
|
|
|
return
|
|
|
|
|
|
2016-05-18 09:43:43 -04:00
|
|
|
|
if text.startswith('!erase'):
|
|
|
|
|
name = text[len('!erase'):].strip()
|
|
|
|
|
success = erase_score(name)
|
|
|
|
|
if success:
|
|
|
|
|
outputs.append([data['channel'], "Erased score for _{}_.".format(name)])
|
|
|
|
|
else:
|
|
|
|
|
outputs.append([data['channel'], "No score for _{}_.".format(name)])
|
|
|
|
|
return
|
|
|
|
|
|
2016-05-19 11:59:33 -04:00
|
|
|
|
score, name = calculate_score_and_find_operators(text)
|
2016-05-21 10:37:44 -04:00
|
|
|
|
if score is not None and name:
|
2016-05-19 11:59:33 -04:00
|
|
|
|
LOGGER.info('Adding %s to %s', score, name)
|
|
|
|
|
if score:
|
|
|
|
|
score = update_item(name, score)
|
|
|
|
|
outputs.append([data['channel'], '_{}_ now has a score of {}.'.format(name, score)])
|
2016-05-18 09:43:43 -04:00
|
|
|
|
else:
|
2016-05-19 11:59:33 -04:00
|
|
|
|
outputs.append([data['channel'], 'No score change for _{}_.'.format(name)])
|
|
|
|
|
|
2016-09-11 20:01:21 -04:00
|
|
|
|
#
|
|
|
|
|
# Hearts
|
|
|
|
|
#
|
|
|
|
|
|
2016-05-19 11:59:33 -04:00
|
|
|
|
def calculate_score_and_find_operators(text):
|
|
|
|
|
original_text = text
|
|
|
|
|
score = 0
|
2016-05-18 09:43:43 -04:00
|
|
|
|
|
2016-05-19 12:14:16 -04:00
|
|
|
|
while True:
|
|
|
|
|
found = False
|
2016-05-18 09:43:43 -04:00
|
|
|
|
|
2016-05-19 12:14:16 -04:00
|
|
|
|
op, is_prefix = has_operator(text, PLUSES)
|
|
|
|
|
if op:
|
|
|
|
|
text = strip_operator(text, op, is_prefix)
|
|
|
|
|
score += 1
|
|
|
|
|
found = True
|
2016-05-18 09:43:43 -04:00
|
|
|
|
|
2016-05-19 12:14:16 -04:00
|
|
|
|
op, is_prefix = has_operator(text, MINUSES)
|
|
|
|
|
if op:
|
|
|
|
|
text = strip_operator(text, op, is_prefix)
|
|
|
|
|
score -= 1
|
|
|
|
|
found = True
|
2016-05-18 09:43:43 -04:00
|
|
|
|
|
2016-05-19 12:14:16 -04:00
|
|
|
|
if not found:
|
|
|
|
|
break
|
2016-05-18 09:43:43 -04:00
|
|
|
|
|
2016-05-19 11:59:33 -04:00
|
|
|
|
did_change = original_text != text
|
|
|
|
|
if did_change:
|
2016-09-11 20:01:21 -04:00
|
|
|
|
tokenized = WHITESPACE_RE.split(text)
|
2016-09-11 15:10:51 -04:00
|
|
|
|
|
|
|
|
|
# If the remaining string is all emojis, ignore it
|
|
|
|
|
emoji_matches = map(EMOJI_RE.match, tokenized)
|
|
|
|
|
all_emoji = all(emoji_matches)
|
|
|
|
|
if all_emoji:
|
2016-09-11 20:01:21 -04:00
|
|
|
|
LOGGER.debug('Message is all emoji, skipping')
|
2016-09-11 15:10:51 -04:00
|
|
|
|
return None, None
|
|
|
|
|
|
2016-09-11 20:01:21 -04:00
|
|
|
|
tokenized = map(strip_colon, tokenized)
|
|
|
|
|
tokenized = map(swap_links_and_vars, tokenized)
|
|
|
|
|
|
|
|
|
|
text = ' '.join(tokenized)
|
|
|
|
|
LOGGER.debug('Score {} for message: {}'.format(score, text))
|
2016-05-19 11:59:33 -04:00
|
|
|
|
return score, text
|
|
|
|
|
else:
|
2016-09-11 20:01:21 -04:00
|
|
|
|
LOGGER.debug('No score adjustment for message: {}'.format(text))
|
2016-05-19 11:59:33 -04:00
|
|
|
|
return None, None
|
|
|
|
|
|
2016-05-19 12:14:16 -04:00
|
|
|
|
def strip_operator(text, operator, is_prefix):
|
|
|
|
|
len_op = len(operator)
|
|
|
|
|
if is_prefix:
|
|
|
|
|
return text[len_op:].lstrip()
|
|
|
|
|
else:
|
|
|
|
|
return text[:-len_op].rstrip()
|
2016-05-18 09:43:43 -04:00
|
|
|
|
|
2016-06-17 10:10:21 -04:00
|
|
|
|
def has_operator(text, operators):
|
|
|
|
|
for op in operators:
|
|
|
|
|
if text.startswith(op):
|
|
|
|
|
return op, True
|
|
|
|
|
elif text.endswith(op):
|
|
|
|
|
return op, False
|
|
|
|
|
return None, None
|
|
|
|
|
|
2016-09-11 20:01:21 -04:00
|
|
|
|
def strip_colon(item):
|
|
|
|
|
'''Remove trailing colon from messages that @ a particular user.'''
|
|
|
|
|
m = LINK_RE.match(item)
|
|
|
|
|
if not m:
|
|
|
|
|
return item
|
|
|
|
|
if not (m.end() == (len(item)-1) and item.endswith(':')):
|
|
|
|
|
return item
|
|
|
|
|
return item[:-1]
|
|
|
|
|
|
|
|
|
|
def swap_links_and_vars(item):
|
|
|
|
|
'''
|
|
|
|
|
Swap links and variables for their names. This is for things like @eryn, #general, and !everyone.
|
|
|
|
|
'''
|
|
|
|
|
m = LINK_RE.match(item)
|
|
|
|
|
if not m:
|
|
|
|
|
return item
|
|
|
|
|
|
|
|
|
|
link_type = m.group('type')
|
|
|
|
|
|
|
|
|
|
# Users
|
|
|
|
|
if link_type == '@':
|
|
|
|
|
name = m.group('name')
|
|
|
|
|
if name:
|
|
|
|
|
return name
|
|
|
|
|
ident = m.group('id')
|
|
|
|
|
users = [u for u in USERS if u['id'] == ident]
|
|
|
|
|
try:
|
|
|
|
|
return users[0]['name']
|
|
|
|
|
except IndexError:
|
|
|
|
|
return item
|
|
|
|
|
|
|
|
|
|
# Channels
|
|
|
|
|
elif link_type == '#':
|
|
|
|
|
name = m.group('name')
|
|
|
|
|
if name:
|
|
|
|
|
return name
|
|
|
|
|
ident = m.group('id')
|
|
|
|
|
channels = [c for c in CHANNELS if c['id'] == ident]
|
|
|
|
|
try:
|
|
|
|
|
return channels[0]['name']
|
|
|
|
|
except IndexError:
|
|
|
|
|
return item
|
|
|
|
|
|
|
|
|
|
# Variables (e.g. everyone, channel, here, etc)
|
|
|
|
|
elif link_type == '!':
|
|
|
|
|
name = m.group('name')
|
|
|
|
|
return name if name else m.group('id')
|
|
|
|
|
|
|
|
|
|
return item
|
|
|
|
|
|
2016-09-11 15:10:51 -04:00
|
|
|
|
def leaders(n, top=True):
|
|
|
|
|
if n == 0:
|
|
|
|
|
return
|
2016-05-18 09:43:43 -04:00
|
|
|
|
|
2016-07-06 01:54:06 -04:00
|
|
|
|
data = read_data()
|
|
|
|
|
items = [(score, name) for name, score in data.items()]
|
2016-09-11 15:10:51 -04:00
|
|
|
|
items.sort(key=lambda item: item[0], reverse=top)
|
2016-07-06 01:54:06 -04:00
|
|
|
|
out = ''
|
2016-09-11 15:10:51 -04:00
|
|
|
|
|
|
|
|
|
for idx in range(n):
|
2016-07-06 01:54:06 -04:00
|
|
|
|
try:
|
|
|
|
|
item = items[idx]
|
2016-09-11 15:10:51 -04:00
|
|
|
|
rank = idx + 1 if top else len(items) - idx
|
|
|
|
|
out += '{}. _{}_ : {}\n'.format(rank, item[1], item[0])
|
2016-07-06 01:54:06 -04:00
|
|
|
|
except IndexError:
|
|
|
|
|
break
|
2016-09-11 15:10:51 -04:00
|
|
|
|
|
2016-07-06 01:54:06 -04:00
|
|
|
|
return out
|
|
|
|
|
|
2016-06-17 10:10:21 -04:00
|
|
|
|
#
|
|
|
|
|
# Persistence
|
|
|
|
|
#
|
2016-05-18 09:43:43 -04:00
|
|
|
|
|
|
|
|
|
def erase_score(name):
|
|
|
|
|
data = read_data()
|
|
|
|
|
try:
|
|
|
|
|
del data[name]
|
|
|
|
|
except KeyError:
|
|
|
|
|
return False
|
|
|
|
|
else:
|
|
|
|
|
write_data(data)
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
def read_data():
|
|
|
|
|
if not os.path.exists(HEARTS_FILE):
|
|
|
|
|
return {}
|
|
|
|
|
with open(HEARTS_FILE) as f:
|
|
|
|
|
return json.load(f)
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
def write_data(obj):
|
|
|
|
|
with open(HEARTS_FILE, 'w') as f:
|
|
|
|
|
json.dump(obj, f, sort_keys=True, indent=4)
|
|
|
|
|
|
2016-06-17 10:10:21 -04:00
|
|
|
|
def update_item(name, increment):
|
|
|
|
|
data = read_data()
|
|
|
|
|
score = data.get(name)
|
|
|
|
|
if not score:
|
|
|
|
|
score = 0
|
|
|
|
|
score += increment
|
|
|
|
|
data[name] = score
|
|
|
|
|
write_data(data)
|
|
|
|
|
return score
|
|
|
|
|
|