twitter-ebooks/lib/twitter_ebooks/model.rb

182 lines
4.7 KiB
Ruby
Raw Normal View History

2013-11-08 06:02:05 +11:00
#!/usr/bin/env ruby
# encoding: utf-8
require 'json'
require 'set'
require 'digest/md5'
module Ebooks
class Model
2013-11-18 02:59:15 -08:00
attr_accessor :hash, :sentences, :mentions, :keywords
2013-11-08 06:02:05 +11:00
def self.consume(txtpath)
Model.new.consume(txtpath)
end
def self.load(path)
2014-01-28 16:36:23 -08:00
Marshal.load(File.open(path, 'rb') { |f| f.read })
2013-11-08 06:02:05 +11:00
end
2013-11-27 05:12:54 -08:00
def consume(path)
content = File.read(path)
@hash = Digest::MD5.hexdigest(content)
if path.split('.')[-1] == "json"
log "Reading json corpus from #{path}"
lines = JSON.parse(content, symbolize_names: true).map do |tweet|
tweet[:text]
end
else
log "Reading plaintext corpus from #{path}"
lines = content.split("\n")
end
2013-11-08 06:02:05 +11:00
2013-11-18 02:59:15 -08:00
log "Removing commented lines and sorting mentions"
2013-11-08 06:02:05 +11:00
keeping = []
2013-11-18 02:59:15 -08:00
mentions = []
2013-11-08 06:02:05 +11:00
lines.each do |l|
2013-11-18 02:59:15 -08:00
next if l.start_with?('#') # Remove commented lines
next if l.include?('RT') || l.include?('MT') # Remove soft retweets
if l.include?('@')
mentions << l
else
keeping << l
end
2013-11-08 06:02:05 +11:00
end
2013-11-18 02:59:15 -08:00
text = NLP.normalize(keeping.join("\n")) # Normalize weird characters
mention_text = NLP.normalize(mentions.join("\n"))
2013-11-08 06:02:05 +11:00
log "Segmenting text into sentences"
2013-11-18 02:59:15 -08:00
statements = NLP.sentences(text)
mentions = NLP.sentences(mention_text)
log "Tokenizing #{statements.length} statements and #{mentions.length} mentions"
@sentences = []
@mentions = []
2013-11-08 06:02:05 +11:00
2013-11-18 02:59:15 -08:00
statements.each do |s|
@sentences << NLP.tokenize(s).reject do |t|
t.include?('@') || t.include?('http')
2013-11-18 02:59:15 -08:00
end
end
mentions.each do |s|
@mentions << NLP.tokenize(s).reject do |t|
t.include?('@') || t.include?('http')
2013-11-18 02:59:15 -08:00
end
end
2013-11-08 06:02:05 +11:00
log "Ranking keywords"
@keywords = NLP.keywords(@sentences)
self
end
def save(path)
2014-01-28 16:36:23 -08:00
File.open(path, 'wb') do |f|
2013-11-08 06:02:05 +11:00
f.write(Marshal.dump(self))
end
self
end
def fix(tweet)
# This seems to require an external api call
#begin
# fixer = NLP.gingerice.parse(tweet)
# log fixer if fixer['corrections']
# tweet = fixer['result']
#rescue Exception => e
# log e.message
# log e.backtrace
#end
NLP.htmlentities.decode tweet
end
def valid_tweet?(tokens, limit)
tweet = NLP.reconstruct(tokens)
tweet.length <= limit && !NLP.unmatched_enclosers?(tweet)
end
2013-11-18 02:59:15 -08:00
def make_statement(limit=140, generator=nil, retry_limit=10)
responding = !generator.nil?
2013-11-14 10:19:08 -08:00
generator ||= SuffixGenerator.build(@sentences)
2013-11-18 02:59:15 -08:00
retries = 0
2013-11-08 06:02:05 +11:00
tweet = ""
while (tokens = generator.generate(3, :bigrams)) do
next if tokens.length <= 3 && !responding
break if valid_tweet?(tokens, limit)
2013-11-18 02:59:15 -08:00
retries += 1
break if retries >= retry_limit
end
2013-11-18 02:59:15 -08:00
if verbatim?(tokens) && tokens.length > 3 # We made a verbatim tweet by accident
while (tokens = generator.generate(3, :unigrams)) do
2013-11-18 02:59:15 -08:00
break if valid_tweet?(tokens, limit) && !verbatim?(tokens)
retries += 1
break if retries >= retry_limit
end
2013-11-08 06:02:05 +11:00
end
tweet = NLP.reconstruct(tokens)
2013-11-18 02:59:15 -08:00
if retries >= retry_limit
log "Unable to produce valid non-verbatim tweet; using \"#{tweet}\""
end
2013-11-08 06:02:05 +11:00
fix tweet
end
2013-11-18 02:59:15 -08:00
# Test if a sentence has been copied verbatim from original
def verbatim?(tokens)
@sentences.include?(tokens) || @mentions.include?(tokens)
end
2013-11-08 06:02:05 +11:00
# Finds all relevant tokenized sentences to given input by
# comparing non-stopword token overlaps
2013-11-18 02:59:15 -08:00
def find_relevant(sentences, input)
2013-11-08 06:02:05 +11:00
relevant = []
slightly_relevant = []
2013-11-18 02:59:15 -08:00
tokenized = NLP.tokenize(input).map(&:downcase)
2013-11-08 06:02:05 +11:00
2013-11-18 02:59:15 -08:00
sentences.each do |sent|
2013-11-08 06:02:05 +11:00
tokenized.each do |token|
2013-11-18 02:59:15 -08:00
if sent.map(&:downcase).include?(token)
2013-11-08 06:02:05 +11:00
relevant << sent unless NLP.stopword?(token)
slightly_relevant << sent
end
end
end
[relevant, slightly_relevant]
end
# Generates a response by looking for related sentences
# in the corpus and building a smaller generator from these
2013-11-18 02:59:15 -08:00
def make_response(input, limit=140, sentences=@mentions)
# Prefer mentions
relevant, slightly_relevant = find_relevant(sentences, input)
2013-11-08 06:02:05 +11:00
if relevant.length >= 3
generator = SuffixGenerator.build(relevant)
make_statement(limit, generator)
elsif slightly_relevant.length >= 5
generator = SuffixGenerator.build(slightly_relevant)
make_statement(limit, generator)
2013-11-18 02:59:15 -08:00
elsif sentences.equal?(@mentions)
make_response(input, limit, @sentences)
2013-11-08 06:02:05 +11:00
else
make_statement(limit)
2013-11-08 06:02:05 +11:00
end
end
end
end