From 92c31cd0ef3a030db47c93f3ddad613124df5c12 Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 12:25:36 -0500
Subject: [PATCH 01/27] Fixed problems with 405: method not allowed for links
---
.gitignore | 1 +
modules/head.py | 19 ++++++++++++-------
2 files changed, 13 insertions(+), 7 deletions(-)
diff --git a/.gitignore b/.gitignore
index b6aaca6bb..be18f9e96 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,4 @@
*~
*.swp
__pycache__
+desktop.ini
diff --git a/modules/head.py b/modules/head.py
index 3835522e4..6b6ba1cf1 100644
--- a/modules/head.py
+++ b/modules/head.py
@@ -10,6 +10,7 @@
import re
import urllib.parse
import time
+import requests
from html.entities import name2codepoint
import web
from tools import deprecated
@@ -124,13 +125,17 @@ def gettitle(phenny, uri):
try:
redirects = 0
while True:
- info = web.head(uri)
-
- if not isinstance(info, list):
- status = '200'
- else:
- status = str(info[1])
- info = info[0]
+ try:
+ info = web.head(uri)
+
+ if not isinstance(info, list):
+ status = '200'
+ else:
+ status = str(info[1])
+ info = info[0]
+ except web.HTTPError:
+ info = requests.get(uri, headers=web.default_headers, verify=True)
+ status = str(info.status_code)
if status.startswith('3'):
uri = urllib.parse.urljoin(uri, info['Location'])
else:
From 60cb026f70d0bf895c247266cf69050dc09d21ff Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 13:00:55 -0500
Subject: [PATCH 02/27] Added apertium translate plugin
---
modules/apertium_translate.py | 190 ++++++++++++++++++++++++++++++++++
1 file changed, 190 insertions(+)
create mode 100644 modules/apertium_translate.py
diff --git a/modules/apertium_translate.py b/modules/apertium_translate.py
new file mode 100644
index 000000000..a11207131
--- /dev/null
+++ b/modules/apertium_translate.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+# coding=utf-8
+"""
+apertium_translate.py - Phenny Translation Module
+"""
+
+import re, urllib.request, json
+import web
+from tools import GrumbleError
+
+headers = [(
+ 'User-Agent', 'Mozilla/5.0' +
+ '(X11; U; Linux i686)' +
+ 'Gecko/20071127 Firefox/2.0.0.11'
+)]
+
+APIerrorData = 'Sorry, the apertium API did not return any data ☹'
+APIerrorHttp = 'Sorry, the apertium API gave HTTP error %s: %s ☹'
+
+def translate(translate_me, input_lang, output_lang='en'):
+ opener = urllib.request.build_opener()
+ opener.addheaders = headers
+
+ input_lang, output_lang = web.quote(input_lang), web.quote(output_lang)
+ translate_me = web.quote(translate_me)
+
+ response = opener.open('http://api.apertium.org/json/translate?q='+translate_me+'&langpair='+input_lang+"|"+output_lang).read()
+
+ responseArray = json.loads(response.decode('utf-8'))
+ if int(responseArray['responseStatus']) != 200:
+ raise GrumbleError(APIerrorHttp % (responseArray['responseStatus'], responseArray['responseDetails']))
+ if responseArray['responseData']['translatedText'] == []:
+ raise GrumbleError(APIerrorData)
+
+ translated_text = responseArray['responseData']['translatedText']
+ return translated_text
+
+
+def apertium_translate(phenny, input):
+ """Translates a phrase using the apertium API"""
+ line = input.group(2)
+ if not line:
+ raise GrumbleError("Need something to translate!")
+ #line = line.encode('utf-8')
+
+ pairs = []
+ guidelines = line.split('|')
+ if len(guidelines) > 1:
+ for guideline in guidelines[1:]:
+ #phenny.say(guideline)
+ pairs.append(guideline.strip().split('-'))
+ guidelines = guidelines[0]
+ #phenny.say(str(guidelines))
+ stuff = re.search('(.*) ([a-z]+-[a-z]+)', guidelines)
+ #phenny.say(str(stuff.groups()))
+ pairs.insert(0, stuff.group(2).split('-'))
+ translate_me = stuff.group(1)
+ #phenny.say(str(pairs))
+
+ #output_lang = line.split(' ')[-1]
+ #input_lang = line.split(' ')[-2]
+ #translate_me = ' '.join(line.split(' ')[:-2])
+
+ if (len(translate_me) > 350) and (not input.admin):
+ raise GrumbleError('Phrase must be under 350 characters.')
+
+ msg = translate_me
+ finalmsg = False
+ translated = ""
+ for (input_lang, output_lang) in pairs:
+ if input_lang == output_lang:
+ raise GrumbleError('Stop trying to confuse me! Pick different languages ;)')
+ msg = translate(msg, input_lang, output_lang)
+ if not msg:
+ raise GrumbleError('The %s to %s translation failed, sorry!' % (input_lang, output_lang))
+ msg = web.decode(msg) # msg.replace(''', "'")
+ this_translated = "(%s-%s) %s" % (input_lang, output_lang, msg)
+ translated = msg
+
+ #if not finalmsg:
+ # finalmsg = translated
+ #phenny.reply(finalmsg)
+ phenny.reply(translated)
+
+def apertium_listlangs(phenny, input):
+ """Lists languages available for translation from/to"""
+
+ opener = urllib.request.build_opener()
+ opener.addheaders = headers
+
+ response = opener.open('http://api.apertium.org/json/listPairs').read()
+
+ langs = json.loads(response.decode('utf-8'))
+ if int(langs['responseStatus']) != 200:
+ raise GrumbleError(APIerrorHttp % (langs['responseStatus'], langs['responseDetails']))
+ if langs['responseData'] == []:
+ raise GrumbleError(APIerrorData)
+
+ outlangs = []
+ #phenny.say(str(langs))
+ for pair in langs['responseData']:
+ if pair['sourceLanguage'] not in outlangs:
+ outlangs.append(pair['sourceLanguage'])
+ if pair['targetLanguage'] not in outlangs:
+ outlangs.append(pair['targetLanguage'])
+ #phenny.say(str(outlangs))
+
+ extra = "; more info: .listpairs lg"
+
+ first=True
+ allLangs = ""
+ for lang in outlangs:
+ if not first:
+ allLangs+=", "
+ else:
+ first=False
+ allLangs += lang
+ phenny.say(allLangs + extra)
+
+
+def apertium_listpairs(phenny, input):
+ """Lists translation pairs available to apertium translation"""
+ lang = input.group(2)
+
+ opener = urllib.request.build_opener()
+ opener.addheaders = headers
+
+ response = opener.open('http://api.apertium.org/json/listPairs').read()
+
+ langs = json.loads(response.decode('utf-8'))
+
+ langs = json.loads(response.decode('utf-8'))
+ if langs['responseData'] is []:
+ raise GrumbleError(APIerrorData)
+ if int(langs['responseStatus']) != 200:
+ raise GrumbleError(APIerrorHttp % (langs['responseStatus'], langs['responseDetails']))
+
+ if not lang:
+ allpairs=""
+ first=True
+ for pair in langs['responseData']:
+ if not first:
+ allpairs+=","
+ else:
+ first=False
+ allpairs+="%s→%s" % (pair['sourceLanguage'], pair['targetLanguage'])
+ phenny.say(allpairs)
+ else:
+ toLang = []
+ fromLang = []
+ for pair in langs['responseData']:
+ if pair['sourceLanguage'] == lang:
+ fromLang.append(pair['targetLanguage'])
+ if pair['targetLanguage'] == lang:
+ toLang.append(pair['sourceLanguage'])
+ first=True
+ froms = ""
+ for lg in fromLang:
+ if not first:
+ froms += ", "
+ else:
+ first = False
+ froms += lg
+ first = True
+ tos = ""
+ for lg in toLang:
+ if not first:
+ tos += ", "
+ else:
+ first = False
+ tos += lg
+ #finals = froms + (" → %s → " % lang) + tos
+ finals = tos + (" → %s → " % lang) + froms
+
+ phenny.say(finals)
+
+apertium_listpairs.name = 'listpairs'
+apertium_listpairs.commands = ['listpairs']
+apertium_listpairs.example = '.listpairs ca'
+apertium_listpairs.priority = 'low'
+
+apertium_listlangs.name = 'listlangs'
+apertium_listlangs.commands = ['listlangs']
+apertium_listlangs.example = '.listlangs'
+apertium_listlangs.priority = 'low'
+
+apertium_translate.name = 't'
+apertium_translate.commands = ['t']
+apertium_translate.example = '.t I like pie en-es'
+apertium_translate.priority = 'high'
\ No newline at end of file
From 14d11c03c3d93ac6b0ecfafcb35de1fed6d3e4ec Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 14:09:01 -0500
Subject: [PATCH 03/27] Get title 405 fix take 2
---
modules/head.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/head.py b/modules/head.py
index 6b6ba1cf1..9050379d6 100644
--- a/modules/head.py
+++ b/modules/head.py
@@ -136,6 +136,7 @@ def gettitle(phenny, uri):
except web.HTTPError:
info = requests.get(uri, headers=web.default_headers, verify=True)
status = str(info.status_code)
+ info = info.headers
if status.startswith('3'):
uri = urllib.parse.urljoin(uri, info['Location'])
else:
From fd1f70e3f1afebefcf3e4b5b7605b34ab682f784 Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 19:53:53 -0500
Subject: [PATCH 04/27] Get title 404 fix
---
modules/head.py | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/modules/head.py b/modules/head.py
index 9050379d6..258d932ec 100644
--- a/modules/head.py
+++ b/modules/head.py
@@ -134,9 +134,13 @@ def gettitle(phenny, uri):
status = str(info[1])
info = info[0]
except web.HTTPError:
- info = requests.get(uri, headers=web.default_headers, verify=True)
- status = str(info.status_code)
- info = info.headers
+ try:
+ info = requests.get(uri, headers=web.default_headers, verify=True)
+ status = str(info.status_code)
+ info = info.headers
+ except web.HTTPError:
+ return None
+
if status.startswith('3'):
uri = urllib.parse.urljoin(uri, info['Location'])
else:
From 490ad8262bcf08faf7c9ee5871b3a292d7f4f84a Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 19:59:33 -0500
Subject: [PATCH 05/27] fixed apertium_translate's spacing
---
modules/apertium_translate.py | 292 +++++++++++++++++-----------------
1 file changed, 146 insertions(+), 146 deletions(-)
diff --git a/modules/apertium_translate.py b/modules/apertium_translate.py
index a11207131..88ee8990d 100644
--- a/modules/apertium_translate.py
+++ b/modules/apertium_translate.py
@@ -9,170 +9,170 @@
from tools import GrumbleError
headers = [(
- 'User-Agent', 'Mozilla/5.0' +
- '(X11; U; Linux i686)' +
- 'Gecko/20071127 Firefox/2.0.0.11'
+ 'User-Agent', 'Mozilla/5.0' +
+ '(X11; U; Linux i686)' +
+ 'Gecko/20071127 Firefox/2.0.0.11'
)]
APIerrorData = 'Sorry, the apertium API did not return any data ☹'
APIerrorHttp = 'Sorry, the apertium API gave HTTP error %s: %s ☹'
def translate(translate_me, input_lang, output_lang='en'):
- opener = urllib.request.build_opener()
- opener.addheaders = headers
+ opener = urllib.request.build_opener()
+ opener.addheaders = headers
- input_lang, output_lang = web.quote(input_lang), web.quote(output_lang)
- translate_me = web.quote(translate_me)
+ input_lang, output_lang = web.quote(input_lang), web.quote(output_lang)
+ translate_me = web.quote(translate_me)
- response = opener.open('http://api.apertium.org/json/translate?q='+translate_me+'&langpair='+input_lang+"|"+output_lang).read()
+ response = opener.open('http://api.apertium.org/json/translate?q='+translate_me+'&langpair='+input_lang+"|"+output_lang).read()
- responseArray = json.loads(response.decode('utf-8'))
- if int(responseArray['responseStatus']) != 200:
- raise GrumbleError(APIerrorHttp % (responseArray['responseStatus'], responseArray['responseDetails']))
- if responseArray['responseData']['translatedText'] == []:
- raise GrumbleError(APIerrorData)
+ responseArray = json.loads(response.decode('utf-8'))
+ if int(responseArray['responseStatus']) != 200:
+ raise GrumbleError(APIerrorHttp % (responseArray['responseStatus'], responseArray['responseDetails']))
+ if responseArray['responseData']['translatedText'] == []:
+ raise GrumbleError(APIerrorData)
- translated_text = responseArray['responseData']['translatedText']
- return translated_text
+ translated_text = responseArray['responseData']['translatedText']
+ return translated_text
def apertium_translate(phenny, input):
- """Translates a phrase using the apertium API"""
- line = input.group(2)
- if not line:
- raise GrumbleError("Need something to translate!")
- #line = line.encode('utf-8')
-
- pairs = []
- guidelines = line.split('|')
- if len(guidelines) > 1:
- for guideline in guidelines[1:]:
- #phenny.say(guideline)
- pairs.append(guideline.strip().split('-'))
- guidelines = guidelines[0]
- #phenny.say(str(guidelines))
- stuff = re.search('(.*) ([a-z]+-[a-z]+)', guidelines)
- #phenny.say(str(stuff.groups()))
- pairs.insert(0, stuff.group(2).split('-'))
- translate_me = stuff.group(1)
- #phenny.say(str(pairs))
-
- #output_lang = line.split(' ')[-1]
- #input_lang = line.split(' ')[-2]
- #translate_me = ' '.join(line.split(' ')[:-2])
-
- if (len(translate_me) > 350) and (not input.admin):
- raise GrumbleError('Phrase must be under 350 characters.')
-
- msg = translate_me
- finalmsg = False
- translated = ""
- for (input_lang, output_lang) in pairs:
- if input_lang == output_lang:
- raise GrumbleError('Stop trying to confuse me! Pick different languages ;)')
- msg = translate(msg, input_lang, output_lang)
- if not msg:
- raise GrumbleError('The %s to %s translation failed, sorry!' % (input_lang, output_lang))
- msg = web.decode(msg) # msg.replace(''', "'")
- this_translated = "(%s-%s) %s" % (input_lang, output_lang, msg)
- translated = msg
-
- #if not finalmsg:
- # finalmsg = translated
- #phenny.reply(finalmsg)
- phenny.reply(translated)
+ """Translates a phrase using the apertium API"""
+ line = input.group(2)
+ if not line:
+ raise GrumbleError("Need something to translate!")
+ #line = line.encode('utf-8')
+
+ pairs = []
+ guidelines = line.split('|')
+ if len(guidelines) > 1:
+ for guideline in guidelines[1:]:
+ #phenny.say(guideline)
+ pairs.append(guideline.strip().split('-'))
+ guidelines = guidelines[0]
+ #phenny.say(str(guidelines))
+ stuff = re.search('(.*) ([a-z]+-[a-z]+)', guidelines)
+ #phenny.say(str(stuff.groups()))
+ pairs.insert(0, stuff.group(2).split('-'))
+ translate_me = stuff.group(1)
+ #phenny.say(str(pairs))
+
+ #output_lang = line.split(' ')[-1]
+ #input_lang = line.split(' ')[-2]
+ #translate_me = ' '.join(line.split(' ')[:-2])
+
+ if (len(translate_me) > 350) and (not input.admin):
+ raise GrumbleError('Phrase must be under 350 characters.')
+
+ msg = translate_me
+ finalmsg = False
+ translated = ""
+ for (input_lang, output_lang) in pairs:
+ if input_lang == output_lang:
+ raise GrumbleError('Stop trying to confuse me! Pick different languages ;)')
+ msg = translate(msg, input_lang, output_lang)
+ if not msg:
+ raise GrumbleError('The %s to %s translation failed, sorry!' % (input_lang, output_lang))
+ msg = web.decode(msg) # msg.replace(''', "'")
+ this_translated = "(%s-%s) %s" % (input_lang, output_lang, msg)
+ translated = msg
+
+ #if not finalmsg:
+ # finalmsg = translated
+ #phenny.reply(finalmsg)
+ phenny.reply(translated)
def apertium_listlangs(phenny, input):
- """Lists languages available for translation from/to"""
-
- opener = urllib.request.build_opener()
- opener.addheaders = headers
-
- response = opener.open('http://api.apertium.org/json/listPairs').read()
-
- langs = json.loads(response.decode('utf-8'))
- if int(langs['responseStatus']) != 200:
- raise GrumbleError(APIerrorHttp % (langs['responseStatus'], langs['responseDetails']))
- if langs['responseData'] == []:
- raise GrumbleError(APIerrorData)
-
- outlangs = []
- #phenny.say(str(langs))
- for pair in langs['responseData']:
- if pair['sourceLanguage'] not in outlangs:
- outlangs.append(pair['sourceLanguage'])
- if pair['targetLanguage'] not in outlangs:
- outlangs.append(pair['targetLanguage'])
- #phenny.say(str(outlangs))
-
- extra = "; more info: .listpairs lg"
-
- first=True
- allLangs = ""
- for lang in outlangs:
- if not first:
- allLangs+=", "
- else:
- first=False
- allLangs += lang
- phenny.say(allLangs + extra)
+ """Lists languages available for translation from/to"""
+
+ opener = urllib.request.build_opener()
+ opener.addheaders = headers
+
+ response = opener.open('http://api.apertium.org/json/listPairs').read()
+
+ langs = json.loads(response.decode('utf-8'))
+ if int(langs['responseStatus']) != 200:
+ raise GrumbleError(APIerrorHttp % (langs['responseStatus'], langs['responseDetails']))
+ if langs['responseData'] == []:
+ raise GrumbleError(APIerrorData)
+
+ outlangs = []
+ #phenny.say(str(langs))
+ for pair in langs['responseData']:
+ if pair['sourceLanguage'] not in outlangs:
+ outlangs.append(pair['sourceLanguage'])
+ if pair['targetLanguage'] not in outlangs:
+ outlangs.append(pair['targetLanguage'])
+ #phenny.say(str(outlangs))
+
+ extra = "; more info: .listpairs lg"
+
+ first=True
+ allLangs = ""
+ for lang in outlangs:
+ if not first:
+ allLangs+=", "
+ else:
+ first=False
+ allLangs += lang
+ phenny.say(allLangs + extra)
def apertium_listpairs(phenny, input):
- """Lists translation pairs available to apertium translation"""
- lang = input.group(2)
-
- opener = urllib.request.build_opener()
- opener.addheaders = headers
-
- response = opener.open('http://api.apertium.org/json/listPairs').read()
-
- langs = json.loads(response.decode('utf-8'))
-
- langs = json.loads(response.decode('utf-8'))
- if langs['responseData'] is []:
- raise GrumbleError(APIerrorData)
- if int(langs['responseStatus']) != 200:
- raise GrumbleError(APIerrorHttp % (langs['responseStatus'], langs['responseDetails']))
-
- if not lang:
- allpairs=""
- first=True
- for pair in langs['responseData']:
- if not first:
- allpairs+=","
- else:
- first=False
- allpairs+="%s→%s" % (pair['sourceLanguage'], pair['targetLanguage'])
- phenny.say(allpairs)
- else:
- toLang = []
- fromLang = []
- for pair in langs['responseData']:
- if pair['sourceLanguage'] == lang:
- fromLang.append(pair['targetLanguage'])
- if pair['targetLanguage'] == lang:
- toLang.append(pair['sourceLanguage'])
- first=True
- froms = ""
- for lg in fromLang:
- if not first:
- froms += ", "
- else:
- first = False
- froms += lg
- first = True
- tos = ""
- for lg in toLang:
- if not first:
- tos += ", "
- else:
- first = False
- tos += lg
- #finals = froms + (" → %s → " % lang) + tos
- finals = tos + (" → %s → " % lang) + froms
-
- phenny.say(finals)
+ """Lists translation pairs available to apertium translation"""
+ lang = input.group(2)
+
+ opener = urllib.request.build_opener()
+ opener.addheaders = headers
+
+ response = opener.open('http://api.apertium.org/json/listPairs').read()
+
+ langs = json.loads(response.decode('utf-8'))
+
+ langs = json.loads(response.decode('utf-8'))
+ if langs['responseData'] is []:
+ raise GrumbleError(APIerrorData)
+ if int(langs['responseStatus']) != 200:
+ raise GrumbleError(APIerrorHttp % (langs['responseStatus'], langs['responseDetails']))
+
+ if not lang:
+ allpairs=""
+ first=True
+ for pair in langs['responseData']:
+ if not first:
+ allpairs+=","
+ else:
+ first=False
+ allpairs+="%s→%s" % (pair['sourceLanguage'], pair['targetLanguage'])
+ phenny.say(allpairs)
+ else:
+ toLang = []
+ fromLang = []
+ for pair in langs['responseData']:
+ if pair['sourceLanguage'] == lang:
+ fromLang.append(pair['targetLanguage'])
+ if pair['targetLanguage'] == lang:
+ toLang.append(pair['sourceLanguage'])
+ first=True
+ froms = ""
+ for lg in fromLang:
+ if not first:
+ froms += ", "
+ else:
+ first = False
+ froms += lg
+ first = True
+ tos = ""
+ for lg in toLang:
+ if not first:
+ tos += ", "
+ else:
+ first = False
+ tos += lg
+ #finals = froms + (" → %s → " % lang) + tos
+ finals = tos + (" → %s → " % lang) + froms
+
+ phenny.say(finals)
apertium_listpairs.name = 'listpairs'
apertium_listpairs.commands = ['listpairs']
From 7424f42fbe433db9545d874a1567dd5db7d2caa2 Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 20:05:46 -0500
Subject: [PATCH 06/27] added .botslap
---
modules/botsnack.py | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/modules/botsnack.py b/modules/botsnack.py
index 485eabeb3..7ae4b47d1 100644
--- a/modules/botsnack.py
+++ b/modules/botsnack.py
@@ -106,5 +106,14 @@ def botsnack(phenny, input):
botsnack.last_tick = time.time()
botsnack.coolingdown = False
+def botslap(phenny, input):
+ """tell me I'm being a bad bot"""
+ messages = ["hides in corner", "eats own hat", "apologises", "stares at feet", "points at zfe", "didn't do anything", "doesn't deserve this", "hates you guys", "did it on purpose", "is an inconsistent sketchy little bot", "scurries off"]
+ phenny.do(random.choice(messages))
+
+botslap.commands = ['botslap', 'botsmack']
+botslap.rule = r'(?i)(?:$nickname[,:]? )?(you suck|I hate you|you ruin everything|you spoil all [themyour]*fun|bad|wtf|lame|[youare\']*stupid|silly)(?:[,]? $nickname)?[ \t]*$'
+botsnack.priority = 'low'
+
if __name__ == '__main__':
print(__doc__.strip())
From 59cad85eeb291555c00d4ae79c9b16232047c851 Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 20:07:50 -0500
Subject: [PATCH 07/27] Added .py
---
modules/calc.py | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/modules/calc.py b/modules/calc.py
index 0b715a8ad..28e4fd600 100644
--- a/modules/calc.py
+++ b/modules/calc.py
@@ -47,6 +47,16 @@ def c(phenny, input):
c.commands = ['c']
c.example = '.c 5 + 3'
+def py(phenny, input):
+ """evaluates a python2 expression via a remote sandbox"""
+ query = input.group(2).encode('utf-8')
+ uri = 'http://tumbolia.appspot.com/py/'
+ answer = web.get(uri + web.quote(query))
+ if answer:
+ phenny.say(answer)
+ else: phenny.reply('Sorry, no result.')
+py.commands = ['py']
+py.example = '.py if not False: print "hello world!"'
def wa(phenny, input):
if not input.group(2):
From 00e1a1ad4be0287a00da3a0c17553b14530108d8 Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 20:11:55 -0500
Subject: [PATCH 08/27] Updated .time to the newer version of phenny
---
modules/clock.py | 35 +++++++++++++++++------------------
1 file changed, 17 insertions(+), 18 deletions(-)
diff --git a/modules/clock.py b/modules/clock.py
index ad09aefac..d7597637f 100644
--- a/modules/clock.py
+++ b/modules/clock.py
@@ -32,7 +32,7 @@
'EDT': -4, 'UT': 0, 'PST': -8, 'MEZ': 1, 'BST': 1,
'ACS': 9.5, 'ATL': -4, 'ALA': -9, 'HAW': -10, 'AKDT': -8,
'AKST': -9,
- 'BDST': 2}
+ 'BDST': 2, 'KGT': 6}
TZ1 = {
'NDT': -2.5,
@@ -200,40 +200,39 @@
r_local = re.compile(r'\([a-z]+_[A-Z]+\)')
-@deprecated
-def f_time(self, origin, match, args):
+def f_time(phenny, input):
"""Returns the current time."""
- tz = match.group(2) or 'GMT'
+ tz = input.group(2) or 'GMT'
# Personal time zones, because they're rad
- if hasattr(self.config, 'timezones'):
- People = self.config.timezones
+ if hasattr(phenny.config, 'timezones'):
+ People = phenny.config.timezones
else: People = {}
if tz in People:
tz = People[tz]
- elif (not match.group(2)) and origin.nick in People:
- tz = People[origin.nick]
+ elif (not input.group(2)) and input.nick in People:
+ tz = People[input.nick]
TZ = tz.upper()
if len(tz) > 30: return
if (TZ == 'UTC') or (TZ == 'Z'):
msg = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
- self.msg(origin.sender, msg)
+ phenny.reply(msg)
elif r_local.match(tz): # thanks to Mark Shoulsdon (clsn)
locale.setlocale(locale.LC_TIME, (tz[1:-1], 'UTF-8'))
msg = time.strftime("%A, %d %B %Y %H:%M:%SZ", time.gmtime())
- self.msg(origin.sender, msg)
+ phenny.reply(msg)
elif TZ in TimeZones:
offset = TimeZones[TZ] * 3600
timenow = time.gmtime(time.time() + offset)
msg = time.strftime("%a, %d %b %Y %H:%M:%S " + str(TZ), timenow)
- self.msg(origin.sender, msg)
+ phenny.reply(msg)
elif tz and tz[0] in ('+', '-') and 4 <= len(tz) <= 6:
timenow = time.gmtime(time.time() + (int(tz[:3]) * 3600))
msg = time.strftime("%a, %d %b %Y %H:%M:%S " + str(tz), timenow)
- self.msg(origin.sender, msg)
+ phenny.reply(msg)
else:
try: t = float(tz)
except ValueError:
@@ -242,17 +241,17 @@ def f_time(self, origin, match, args):
if r_tz.match(tz) and os.path.isfile('/usr/share/zoneinfo/' + tz):
cmd, PIPE = 'TZ=%s date' % tz, subprocess.PIPE
proc = subprocess.Popen(cmd, shell=True, stdout=PIPE)
- self.msg(origin.sender, proc.communicate()[0])
+ phenny.reply(proc.communicate()[0])
else:
error = "Sorry, I don't know about the '%s' timezone." % tz
- self.msg(origin.sender, origin.nick + ': ' + error)
+ phenny.reply(error)
else:
timenow = time.gmtime(time.time() + (t * 3600))
msg = time.strftime("%a, %d %b %Y %H:%M:%S " + str(tz), timenow)
- self.msg(origin.sender, msg)
-f_time.commands = ['t']
-f_time.name = 't'
-f_time.example = '.t UTC'
+ phenny.reply(msg)
+f_time.name = 'time'
+f_time.commands = ['time']
+f_time.example = '.time UTC'
def beats(phenny, input):
"""Shows the internet time in Swatch beats."""
From 950138975ba3d8c4503e05cf322cc97eb731bebf Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 20:21:42 -0500
Subject: [PATCH 09/27] Added .ethnologue, language lookup (scrapes language
codes at startup, may want to disable if unneeded)
---
modules/ethnologue.py | 139 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 139 insertions(+)
create mode 100644 modules/ethnologue.py
diff --git a/modules/ethnologue.py b/modules/ethnologue.py
new file mode 100644
index 000000000..da8d0c354
--- /dev/null
+++ b/modules/ethnologue.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python3
+"""
+ethnologue.py - Ethnologue.com language lookup
+author: mattr555
+"""
+
+#from modules.iso639 import ISOcodes
+from lxml import html
+from string import ascii_lowercase
+import os
+import web
+
+def shorten_num(n):
+ if n < 1000:
+ return '{:,}'.format(n)
+ elif n < 1000000:
+ return '{}K'.format(str(round(n/1000, 1)).rstrip('0').rstrip('.'))
+ elif n < 1000000000:
+ return '{}M'.format(str(round(n/1000000, 1)).rstrip('0').rstrip('.'))
+
+def scrape_ethnologue_codes():
+ data = {}
+ base_url = 'http://www.ethnologue.com/browse/codes/'
+ for letter in ascii_lowercase:
+ resp = web.get(base_url + letter)
+ h = html.document_fromstring(resp)
+ for e in h.find_class('views-field-field-iso-639-3'):
+ code = e.find('div/a').text
+ name = e.find('div/a').attrib['title']
+ data[code] = name
+ return data
+
+def filename(phenny):
+ name = phenny.nick + '-' + phenny.config.host + '.ethnologue.db'
+ return os.path.join(os.path.expanduser('~/.phenny'), name)
+
+def write_ethnologue_codes(phenny, raw=None):
+ if raw is None or raw.admin:
+ file = filename(phenny)
+ data = scrape_ethnologue_codes()
+ with open(file, 'w') as f:
+ for k, v in data.items():
+ f.write('{}${}\n'.format(k, v))
+ phenny.ethno_data = data
+ print('Ethnologue iso-639 code fetch successful')
+ if raw:
+ phenny.say('Ethnologue iso-639 code fetch successful')
+ else:
+ phenny.say('Only admins can execute that command!')
+
+write_ethnologue_codes.name = 'write_ethnologue_codes'
+write_ethnologue_codes.commands = ['write-ethno-codes']
+write_ethnologue_codes.priority = 'low'
+
+def read_ethnologue_codes(phenny, raw=None):
+ file = filename(phenny)
+ data = {}
+ with open(file, 'r') as f:
+ for line in f.readlines():
+ code, name = line.split('$')
+ data[code] = name
+ phenny.ethno_data = data
+ print('Ethnologue iso-639 database read successful')
+
+def parse_num_speakers(s):
+ hits = []
+ for i in s.split(' '):
+ if len(i) <= 3 or ',' in i:
+ if i.replace(',', '').replace('.', '').isdigit():
+ hits.append(int(i.replace(',', '').replace('.', '')))
+ if 'ethnic population' in s.lower():
+ return shorten_num(hits[0])
+ return shorten_num(hits[-1])
+
+def ethnologue(phenny, input):
+ """.ethnologue - gives ethnologue info from partial language name or iso639"""
+ raw = str(input.group(2)).lower()
+ iso = []
+ if len(raw) == 3 and raw in phenny.ethno_data:
+ iso.append(raw)
+ elif len(raw) > 3:
+ for code, lang in phenny.ethno_data.items():
+ if raw in lang.lower():
+ iso.append(code)
+
+ if len(iso) == 1:
+ url = "http://www.ethnologue.com/language/" + iso[0]
+ try:
+ resp = web.get(url)
+ except web.HTTPError as e:
+ phenny.say('Oh noes! Ethnologue responded with ' + str(e.code) + ' ' + e.msg)
+ return
+ h = html.document_fromstring(resp)
+
+ if "macrolanguage" in h.find_class('field-name-a-language-of')[0].find('div/div/h2').text:
+ name = h.get_element_by_id('page-title').text
+ iso_code = h.find_class('field-name-language-iso-link-to-sil-org')[0].find('div/div/a').text
+ num_speakers_field = h.find_class('field-name-field-population')[0].find('div/div/p').text
+ num_speakers = parse_num_speakers(num_speakers_field)
+ child_langs = map(lambda e:e.text[1:-1], h.find_class('field-name-field-comments')[0].findall('div/div/p/a'))
+ response = "{} ({}) is a macrolanguage with {} speakers and the following languages: {}. Src: {}".format(
+ name, iso_code, num_speakers, ', '.join(child_langs), url)
+ else:
+ name = h.get_element_by_id('page-title').text
+ iso_code = h.find_class('field-name-language-iso-link-to-sil-org')[0].find('div/div/a').text
+ where_spoken = h.find_class('field-name-a-language-of')[0].find('div/div/h2/a').text
+ where_spoken_cont = h.find_class('field-name-field-region')
+ if where_spoken_cont:
+ where_spoken_cont = where_spoken_cont[0].find('div/div/p').text[:100]
+ if len(where_spoken_cont) > 98:
+ where_spoken_cont += '...'
+ where_spoken += ', ' + where_spoken_cont
+ if where_spoken[-1] != '.':
+ where_spoken += '.'
+ num_speakers_field = h.find_class('field-name-field-population')[0].find('div/div/p').text
+ num_speakers = parse_num_speakers(num_speakers_field)
+ language_status = h.find_class('field-name-language-status')[0].find('div/div/p').text.split('.')[0] + '.'
+
+ response = "{} ({}): spoken in {} {} speakers. Status: {} Src: {}".format(
+ name, iso_code, where_spoken, num_speakers, language_status, url)
+ elif len(iso) > 1:
+ did_you_mean = ['{} ({})'.format(i, phenny.ethno_data[i]) for i in iso if len(i) == 3]
+ response = "Try .iso639 for better results. Did you mean: " + ', '.join(did_you_mean) + "?"
+ else:
+ response = "That ISO code wasn't found. (Hint: use .iso639 for better results)"
+
+ phenny.say(response)
+
+ethnologue.name = 'ethnologue'
+ethnologue.commands = ['ethnologue', 'ethno', 'logue', 'lg', 'eth']
+ethnologue.example = '.ethnologue khk'
+ethnologue.priority = 'low'
+
+def setup(phenny):
+ file = filename(phenny)
+ if os.path.exists(file):
+ read_ethnologue_codes(phenny)
+ else:
+ write_ethnologue_codes(phenny)
From 787ffa3cfb9d924c8cc00ea09ee408fa6d5d9b7c Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 20:51:38 -0500
Subject: [PATCH 10/27] Added eldea, a translation follower .follow
to follow someone phenny will pm you translations as the user
speaks
---
modules/eleda.py | 151 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 151 insertions(+)
create mode 100644 modules/eleda.py
diff --git a/modules/eleda.py b/modules/eleda.py
new file mode 100644
index 000000000..d12fb44f8
--- /dev/null
+++ b/modules/eleda.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python
+
+"""
+eleda.py - Begiak Eleda Module
+Any questions can go to Qasim Iqbal (nick: Qasim) (email: me@qas.im)
+"""
+
+import http.client
+import re, json
+import sys
+#from apertium_translate import translate
+import urllib.request, urllib.parse, urllib.error
+import web
+from tools import GrumbleError
+
+follows = []
+
+headers = {
+ 'User-Agent': 'Mozilla/5.0' + '(X11; U; Linux i686)' + 'Gecko/20071127 Firefox/2.0.0.11'
+}
+
+APIerrorData = 'Sorry, the apertium API did not return any data ☹'
+APIerrorHttp = 'Sorry, the apertium API gave HTTP error %s: %s ☹'
+
+def translate(translate_me, input_lang, output_lang='en'):
+ opener = urllib.request.build_opener()
+ opener.addheaders = headers
+
+ input_lang, output_lang = web.quote(input_lang), web.quote(output_lang)
+ translate_me = web.quote(translate_me)
+
+ response = opener.open('http://api.apertium.org/json/translate?q='+translate_me+'&langpair='+input_lang+"|"+output_lang).read()
+
+ responseArray = json.loads(response.decode('utf-8'))
+ if int(responseArray['responseStatus']) != 200:
+ raise GrumbleError(APIerrorHttp % (responseArray['responseStatus'], responseArray['responseDetails']))
+ if responseArray['responseData']['translatedText'] == []:
+ raise GrumbleError(APIerrorData)
+
+ translated_text = responseArray['responseData']['translatedText']
+ return translated_text
+
+class Eleda(object):
+ sender = ""
+ nick = ""
+ dir = []
+ def __init__(self, sender, nick, dir):
+ self.sender = sender
+ self.nick = nick
+ self.dir = dir
+
+def get_page(domain, url, encoding='utf-8'): #get the HTML of a webpage.
+ conn = http.client.HTTPConnection(domain, 80, timeout=60)
+ conn.request("GET", url, headers=headers)
+ res = conn.getresponse()
+ return res.read().decode(encoding)
+
+def follow(phenny, input): #follow a user
+ """Follow someone and translate as they speak."""
+ global follows
+
+ if input.groups()[1] != None:
+ data = input.group(2).split(' ')
+ nick = data[0]
+
+ if nick.lower() == phenny.config.nick.lower():
+ phenny.reply(phenny.config.nick.upper() + " DOES NOT LIKE TO BE FOLLOWED.")
+ return
+
+ try:
+ dir = data[1].split('-')
+ dir[1] = dir[1]
+ except:
+ phenny.reply("Need language pair!")
+ return
+
+ pairs = get_page('api.apertium.org', '/json/listPairs')
+ if '{"sourceLanguage":"'+dir[0]+'","targetLanguage":"'+dir[1]+'"}' not in pairs:
+ phenny.reply("That language pair does not exist!")
+ return
+
+ if len(data) in [2,3]:
+ sender = input.nick
+ if len(data) == 3 and input.admin == True:
+ #only accept follower paramter if it exists and the nick is admin
+ sender = data[2]
+ else:
+ phenny.reply("Unexpected error.")
+ return
+
+ for i in follows:
+ if i.nick == nick and i.dir == dir and i.sender == sender:
+ phenny.say(sender + " is already following " + nick + " with " + '-'.join(dir) + '.')
+ return
+
+ follows.append(Eleda(sender, nick, dir))
+ phenny.reply(sender + " now following " + nick + " (" + '-'.join(dir) + ").")
+ else:
+ phenny.reply("Need nick and language pair!")
+
+def unfollow(phenny, input): #unfollow a user
+ """Stop following someone."""
+ global follows
+
+ following = False
+ for i in range(len(follows)):
+ if follows[i].nick == input.groups()[1] and follows[i].sender == input.nick:
+ #if this person is indeed being followed (and is indeed the creator of the follow)
+ follows[i] = Eleda('', '', ['', ''])
+ following = True
+ if following == True:
+ phenny.reply(input.groups()[1] + " is no longer being followed.")
+ else:
+ phenny.reply("Sorry, you aren't following that user!")
+
+def following(phenny, input): #list followed users
+ """List people currently being followed."""
+ text = []
+ for i in follows:
+ if i.nick != '':
+ #populate list with following list
+ text.append(i.nick + " (" + '-'.join(i.dir) + ") by " + i.sender)
+ if len(text) < 1:
+ phenny.reply("No one is being followed at the moment.")
+ else:
+ phenny.say('Users currently being followed: ' + ', '.join(text) + '. (Translations are private)')
+
+def test(phenny, input): #filter through each message in the channel
+ if '#' in input.sender:
+ if input.groups()[0][0] == '.' or phenny.config.nick.lower() in input.groups()[0].split(' ')[0].lower():
+ #do not translate if it is a begiak function
+ return
+
+ for i in follows:
+ if i.nick != '':
+ if i.nick == input.nick:
+ #this user is being followed, translate them
+ direction = '-'.join(i.dir)
+ translation = translate(input.group(0), i.dir[0], i.dir[1])
+ translation = translation.replace('*', '')
+ if translation != input.group(0):
+ #don't bother sending a notice if the input is the same as the output
+ phenny.write(['NOTICE', i.sender], i.nick + ' (' + '-'.join(i.dir) + '): ' + translation)
+
+follow.commands = ['follow']
+follow.example = '.follow Qasim en-es'
+unfollow.commands = ['unfollow']
+unfollow.example = '.unfollow Qasim'
+following.commands = ['following']
+following.example = '.following'
+test.rule = r'(.*)'
\ No newline at end of file
From 061ee515257d857ead5753a2c9bbfd915391e0ce Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 20:52:41 -0500
Subject: [PATCH 11/27] Get title 404 fix take 2
---
modules/head.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/modules/head.py b/modules/head.py
index 258d932ec..bcafa48e9 100644
--- a/modules/head.py
+++ b/modules/head.py
@@ -158,7 +158,10 @@ def gettitle(phenny, uri):
if not (('/html' in mtype) or ('/xhtml' in mtype)):
return None
- bytes = web.get(uri)
+ try:
+ bytes = web.get(uri)
+ except web.HTTPError:
+ return None
#bytes = u.read(262144)
#u.close()
From 373cae0a44b7ca1e61cd763f2fbfe3f2529dfeed Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 21:23:40 -0500
Subject: [PATCH 12/27] Added github.py: new commit notifier to use, add your
git repositories to config.git_repositories, where the key is the name and
the value is the url (for github, use api.github.com) then, add a hook to
post_commit for pushing info to phenny's listener port (default 1234)
currently supported: github, bitbucket, google code
---
modules/github.py | 293 ++++++++++++++++++++++++++++++++++++++++++++++
tools.py | 41 +++++++
2 files changed, 334 insertions(+)
create mode 100644 modules/github.py
diff --git a/modules/github.py b/modules/github.py
new file mode 100644
index 000000000..06b5ba09e
--- /dev/null
+++ b/modules/github.py
@@ -0,0 +1,293 @@
+#!/usr/bin/env python
+"""
+github.py - Github Post-Receive Hooks Module
+"""
+
+import http.server
+import socketserver
+import urllib.parse
+import json
+import re
+import os
+import web
+from io import StringIO
+from tools import generate_report
+import time
+
+PORT = 1234
+
+Handler = None
+httpd = None
+
+
+class MyHandler(http.server.SimpleHTTPRequestHandler):
+ phenny = None
+ phInput = None
+
+ def return_data(self, site, data, commit):
+ #hrm, if I'm storing fields in a list in python, but I have something that has complex fields (e.g., data['foo']['bar']), is there some way to write a function that'll
+ #fields['github'] = ['phenny', data['pusher']['name'], commit['message'], commit['modified'], commit['added'], commit['removed'], commit['id'][:7]))
+ if site=="github":
+ name = "phenny"
+ author = data['pusher']['name']
+ message = commit['message']
+ modified = commit['modified']
+ added = commit['added']
+ removed = commit['removed']
+ rev = commit['id'][:7]
+ elif site=="googlecode":
+ name = data['project_name']
+ author = commit['author']
+ message = commit['message']
+ modified = commit['modified']
+ added = commit['added']
+ removed = commit['removed']
+ rev = commit['revision']
+ elif site=="bitbucket":
+ files = self.getBBFiles(commit['files'])
+ name = 'turkiccorpora'
+ author = commit['author']
+ message = commit['message']
+ modified = files['modified']
+ added = files['added']
+ removed = files['removed']
+ rev = commit['node']
+ return generate_report(name, author, message, modified, added, removed, rev)
+
+
+ def do_GET(self):
+ parsed_params = urllib.parse.urlparse(self.path)
+ query_parsed = urllib.parse.parse_qs(parsed_params.query)
+ #self.phenny.say("GET request on port %s: %s" % (PORT, str(query_parsed)))
+ self.send_response(403)
+
+ def do_POST(self):
+ length = int(self.headers['Content-Length'])
+ indata = self.rfile.read(length)
+ #print("indata: "+str(indata))
+ #print("headers: "+str(self.headers))
+ post_data = urllib.parse.parse_qs(indata.decode('utf-8'))
+ if len(post_data) == 0:
+ post_data = indata.decode('utf-8')
+
+ #try:
+ # payload = query_parsed['payload'][0]
+ #except KeyError:
+ #self.phenny.say("Something went wrong with getting the data. WHAT.")
+ #self.send_response(403)
+ #return
+ #self.phenny.say(post_data['payload'][0])
+ if "payload" in post_data:
+ data = json.loads(post_data['payload'][0])
+ else:
+ #print(post_data)
+ data = json.loads(post_data)
+ #print(data)
+
+ msgs = []
+ if "commits" in data:
+ for commit in data['commits']:
+ try:
+ if "committer" in commit:
+ ## For github
+ # msgs.append(generate_report('phenny', data['pusher']['name'], commit['message'], commit['modified'], commit['added'], commit['removed'], commit['id'][:7]))
+ msgs.append(self.return_data("github", data, commit))
+ #elif "pusher" in data:
+ ## for google code
+ # for commit in data['revisions']:
+ # msgs.append(self.return_data(self, "github", data, commit))
+ # #msgs.append()
+
+ elif "author" in commit:
+ ## For bitbucket
+ #msgs.append("unsupported data: "+str(commit))
+ files = self.getBBFiles(commit['files'])
+ msgs.append(generate_report('turkiccorpora', commit['author'], commit['message'], files['modified'], files['added'], files['removed'], commit['node']))
+ else:
+ msgs.append("unsupported data: "+str(commit))
+ except Exception:
+ #msgs.append("unsupported data: "+str(commit))
+ print("unsupported data: "+str(commit))
+ elif "project_name" in data:
+ # for google code
+ for commit in data['revisions']:
+ msgs.append(self.return_data("googlecode", data, commit))
+ #msgs.append()
+
+ if len(msgs)==0:
+ msgs = ["Something went wrong: "+str(data.keys())]
+ for msg in msgs:
+ for chan in self.phInput.chans:
+ self.phenny.bot.msg(chan, msg)
+
+ self.send_response(200)
+
+ def getBBFiles(self, filelist):
+ toReturn = {"added": [], "modified": [], "removed": []}
+ for onefile in filelist:
+ toReturn[onefile['type']].append(onefile['file'])
+ return toReturn
+
+def setup_server(phenny, input):
+ global Handler, httpd
+ Handler = MyHandler
+ Handler.phenny = phenny
+ Handler.phInput = input
+ httpd = socketserver.TCPServer(("", PORT), Handler)
+ phenny.say("Server is up and running on port %s" % PORT)
+ httpd.serve_forever()
+
+def github(phenny, input):
+ global Handler, httpd
+ if Handler is None and httpd is None:
+ #if input.admin:
+ if httpd is not None:
+ httpd.shutdown()
+ httpd = None
+ if Handler is not None:
+ Handler = None
+ setup_server(phenny, input)
+ #else:
+ # phenny.reply("That is an admin-only command.")
+#github.name = 'startserver'
+#github.commands = ['startserver']
+##github.event = 'PRIVMSG'
+##github.rule = r'.*'
+github.name = 'start githook server'
+github.event = "PONG"
+github.rule = r'.*'
+github.priority = 'medium'
+
+
+def stopserver(phenny, input):
+ global Handler, httpd
+ if input.admin:
+ if httpd is not None:
+ httpd.shutdown()
+ httpd = None
+ Handler = None
+ phenny.say("Server has stopped on port %s" % PORT)
+ else:
+ phenny.reply("That is an admin-only command.")
+stopserver.commands = ['stopserver']
+
+def gitserver(phenny, input):
+ ''' control git server '''
+ global Handler, httpd
+ command = input.group(1).strip()
+ if input.admin:
+ if command=="stop":
+ if httpd is not None:
+ httpd.shutdown()
+ httpd.socket.close()
+ httpd = None
+ Handler = None
+ phenny.say("Server has stopped on port %s" % PORT)
+ else:
+ phenny.say("Server is already down!")
+ if command=="start":
+ if httpd is None:
+ Handler = MyHandler
+ Handler.phenny = phenny
+ Handler.phInput = input
+ httpd = socketserver.TCPServer(("", PORT), Handler)
+ phenny.say("Server is up and running on port %s" % PORT)
+ httpd.serve_forever()
+ else:
+ phenny.say("Server is already up!")
+ if command=="status":
+ if httpd is None:
+ if input.admin:
+ phenny.say("Server is down! Start using '.gitserver start'")
+ else:
+ phenny.say("Server is down! (only admin can start it)")
+ else:
+ if input.admin:
+ phenny.say("Server is up! Stop using '.gitserver stop'")
+ else:
+ phenny.say("Server is up and running (only admins can shut it down)")
+ else:
+ if command=="status":
+ if httpd is None:
+ if input.admin:
+ phenny.say("Server is down! Start using '.gitserver start'")
+ else:
+ phenny.say("Server is down! (only admin can start it)")
+ else:
+ if input.admin:
+ phenny.say("Server is up! Stop using '.gitserver stop'")
+ else:
+ phenny.say("Server is up and running (only admins can shut it down)")
+ else:
+ phenny.reply("Only admins control gitserver.")
+
+
+gitserver.name = "gitserver"
+gitserver.rule = ('.gitserver', '(.*)')
+
+def get_commit_info(phenny, repo, sha):
+ repoUrl = phenny.config.git_repositories[repo]
+ #print(repoUrl)
+ if repoUrl.find("code.google.com") >= 0:
+ locationurl = '/source/detail?r=%s'
+ elif repoUrl.find("api.github.com") >= 0:
+ locationurl = '/commits/%s'
+ elif repoUrl.find("bitbucket.org") >=0:
+ locationurl = ''
+ #print(locationurl)
+ html = web.get(repoUrl + locationurl % sha)
+ data = json.loads(html)
+ author = data['commit']['committer']['name']
+ comment = data['commit']['message']
+
+ modified_paths = []
+ added_paths = []
+ removed_paths = []
+
+ for file in data['files']:
+ if file['status'] == 'modified':
+ modified_paths.append(file['filename'])
+ elif file['status'] == 'added':
+ added_paths.append(file['filename'])
+ elif file['status'] == 'removed':
+ removed_paths.append(file['filename'])
+ rev = sha[:7]
+ date = time.strptime(data['commit']['committer']['date'], "%Y-%m-%dT%H:%M:%SZ")
+ date = time.strftime("%d %b %Y %H:%M:%S", date)
+ return author, comment, modified_paths, added_paths, removed_paths, rev, date
+
+def get_recent_commit(phenny, input):
+ for repo in phenny.config.git_repositories:
+ html = web.get(phenny.config.git_repositories[repo] + '/commits')
+ data = json.loads(html)
+ author, comment, modified_paths, added_paths, removed_paths, rev, date = get_commit_info(phenny, repo, data[0]['sha'])
+ msg = generate_report(repo, author, comment, modified_paths, added_paths, removed_paths, rev, date)
+ phenny.say(msg)
+get_recent_commit.rule = ('$nick', 'recent')
+get_recent_commit.priority = 'medium'
+get_recent_commit.thread = True
+
+def retrieve_commit(phenny, input):
+ data = input.group(1).split(' ')
+
+ if len(data) != 2:
+ phenny.reply("Invalid number of parameters.")
+ return
+
+ repo = data[0]
+ rev = data[1]
+
+ if repo in phenny.config.svn_repositories:
+ return
+
+ if repo not in phenny.config.git_repositories:
+ phenny.reply("That repository is not monitored by me!")
+ return
+ try:
+ author, comment, modified_paths, added_paths, removed_paths, rev, date = get_commit_info(phenny, repo, rev)
+ except:
+ phenny.reply("Invalid revision value!")
+ return
+ msg = generate_report(repo, author, comment, modified_paths, added_paths, removed_paths, rev, date)
+ phenny.say(msg)
+retrieve_commit.rule = ('$nick', 'info(?: +(.*))')
diff --git a/tools.py b/tools.py
index d3a659e18..d473f2c09 100755
--- a/tools.py
+++ b/tools.py
@@ -7,6 +7,8 @@
http://inamidst.com/phenny/
"""
+import re
+import os
class GrumbleError(Exception):
pass
@@ -27,5 +29,44 @@ def new(phenny, input, old=old):
new.__name__ = old.__name__
return new
+def generate_report(repo, author, comment, modified_paths, added_paths, removed_paths, rev, date=""):
+ paths = modified_paths + added_paths + removed_paths
+ if comment is None:
+ comment = "No commit message provided!"
+ else:
+ comment = re.sub("[\n\r]+", " ␍ ", comment.strip())
+
+ basepath = os.path.commonprefix(paths)
+ if basepath[-1] != "/":
+ basepath = basepath.split("/")
+ basepath.pop()
+ basepath = '/'.join(basepath) + "/"
+
+ text_paths = []
+ if len(paths) > 0:
+ for path in paths:
+ addition = ""
+ if path in added_paths:
+ addition = " (+)"
+ elif path in removed_paths:
+ addition = " (-)"
+ text_paths.append(os.path.relpath(path, basepath) + addition)
+ if len(text_paths) > 1:
+ if len(text_paths) <= 3:
+ final_path = "%s: %s" % (basepath, ', '.join(text_paths))
+ else:
+ final_path = "%s: %s" % (basepath, ', '.join([text_paths[0], text_paths[1]]) + ' and %s other files' % str(len(text_paths) - 2))
+ else:
+ final_path = paths[0]
+ if final_path in added_paths:
+ final_path += " (+)"
+ elif final_path in removed_paths:
+ final_path += " (-)"
+ if date == "":
+ msg = "%s: %s * %s: %s: %s" % (repo, author, rev, final_path, comment.strip())
+ else:
+ msg = "[%s] %s: %s * %s: %s: %s" % (date, repo, author, rev, final_path, comment.strip())
+ return msg
+
if __name__ == '__main__':
print(__doc__.strip())
From f8354a4fdaad0937ad47c23980a8d7e152909367 Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 21:24:05 -0500
Subject: [PATCH 13/27] get title non-string fix
---
modules/head.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/head.py b/modules/head.py
index bcafa48e9..d7d204b88 100644
--- a/modules/head.py
+++ b/modules/head.py
@@ -160,7 +160,7 @@ def gettitle(phenny, uri):
try:
bytes = web.get(uri)
- except web.HTTPError:
+ except:
return None
#bytes = u.read(262144)
#u.close()
From a123d4c6580540c2dc4855743375056a38ff110b Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 21:30:51 -0500
Subject: [PATCH 14/27] Added .iso639- language code lookup this module
requires ethnologue.py to run (scrapes language codes at startup, may want
to disable if unneeded)
---
bot.py | 2 +-
modules/iso639.py | 164 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 165 insertions(+), 1 deletion(-)
create mode 100644 modules/iso639.py
diff --git a/bot.py b/bot.py
index 4925acaf8..93eeba39a 100755
--- a/bot.py
+++ b/bot.py
@@ -231,7 +231,7 @@ def dispatch(self, origin, args):
if func.thread:
targs = (func, origin, phenny, input)
- t = threading.Thread(target=self.call, args=targs)
+ t = threading.Thread(target=self.call, args=targs, name=func.name)
t.start()
else: self.call(func, origin, phenny, input)
diff --git a/modules/iso639.py b/modules/iso639.py
new file mode 100644
index 000000000..3f866231e
--- /dev/null
+++ b/modules/iso639.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python3
+"""
+iso639.py - ISO codes module
+author: mutantmonkey
+"""
+
+#from tools import GrumbleError
+import random
+from modules.ethnologue import setup as ethno_setup
+from modules.ethnologue import write_ethnologue_codes
+from lxml import html
+import web
+import os
+import threading
+
+template = "%s = %s"
+
+def flatten(s):
+ #match against accented characters
+ my_copy = str(s)
+ flatten_mapping = {
+ 'a': 'áäâ',
+ 'e': 'éè',
+ 'i': 'íî',
+ 'o': 'óö',
+ 'u': 'ùüú',
+ 'n': 'ñ',
+ "'": '’'
+ }
+ for i in my_copy:
+ for k, v in flatten_mapping.items():
+ if i in v:
+ my_copy = my_copy.replace(i, k)
+
+ return my_copy
+
+
+def iso639(phenny, input):
+ """.iso639 | .iso639 - Search ISO 639-1, -2 and -3 for a language code."""
+ response = ""
+ thisCode = str(input.group(2)).lower()
+ if thisCode == "None":
+ thisCode = random.choice(list(phenny.iso_data.keys()))
+ #ISOcodes[random.randint(0,len(ISOcodes)-1)]
+ #random.choice(ISOcodes)
+ if thisCode in phenny.iso_data:
+ response = template % (thisCode, phenny.iso_data[thisCode])
+ else:
+ if len(thisCode) > 3: # so that we don't get e.g. 'a'
+ for oneCode, oneLang in phenny.iso_data.items():
+ if thisCode in flatten(oneLang.lower()):
+ if response != "":
+ response += ", " + template % (oneCode, oneLang)
+ else:
+ response = template % (oneCode, oneLang)
+ #phenny.say("%s %s %s" % (oneCode, oneLang.lower(), thisCode.lower()))
+ if response == "":
+ response = "Sorry, %s not found" % thisCode
+
+ phenny.say(response)
+
+def scrape_wiki_codes():
+ data = {}
+ base_url = 'http://en.wikipedia.org/wiki/List_of_ISO_639'
+ #639-1
+ resp = web.get(base_url + '-1_codes')
+ h = html.document_fromstring(resp)
+ table = h.find_class('wikitable')[0]
+ for row in table.findall('tr')[1:]:
+ name = row.findall('td')[2].find('a').text
+ code = row.findall('td')[4].text
+ data[code] = name
+ #639-2
+ resp = web.get(base_url + '-2_codes')
+ h = html.document_fromstring(resp)
+ table = h.find_class('wikitable')[0]
+ for row in table.findall('tr')[1:]:
+ name = row.findall('td')[3].find('a')
+ if name:
+ name = name.text
+ else:
+ continue
+ code_list = row.findall('td')[0].text.split(' ')
+ if len(code_list) == 1:
+ code = code_list[0]
+ else:
+ for i in code_list:
+ if '*' in i:
+ code = i.replace('*', '')
+ break
+ data[code] = name
+
+ return data
+
+def iso_filename(phenny):
+ name = phenny.nick + '-' + phenny.config.host + '.iso-codes.db'
+ return os.path.join(os.path.expanduser('~/.phenny'), name)
+
+def write_dict(filename, data):
+ with open(filename, 'w', encoding="utf-8") as f:
+ for k, v in data.items():
+ f.write('{}${}\n'.format(k, v))
+
+def read_dict(filename):
+ data = {}
+ with open(filename, 'r', encoding="utf-8") as f:
+ for line in f.readlines():
+ if line == '\n':
+ continue
+ code, name = line.replace('\n', '').split('$')
+ data[code] = name
+ return data
+
+def refresh_database(phenny, raw=None):
+ if raw.admin or raw is None:
+ f = iso_filename(phenny)
+ write_ethnologue_codes(phenny)
+ phenny.iso_data = scrape_wiki_codes()
+ phenny.iso_data.update(phenny.ethno_data)
+ write_dict(f, phenny.iso_data)
+ phenny.say('ISO code database successfully written')
+ else:
+ phenny.say('Only admins can execute that command!')
+
+def thread_check(phenny, raw):
+ for t in threading.enumerate():
+ if t.name == refresh_database.name:
+ phenny.say('An ISO code updating thread is currently running')
+ break
+ else:
+ phenny.say('No ISO code updating thread running')
+
+def setup(phenny):
+ ethno_setup(phenny) #populate ethnologue codes
+ f = iso_filename(phenny)
+ if os.path.exists(f):
+ try:
+ phenny.iso_data = read_dict(f)
+ except ValueError:
+ print('iso database read failed, refreshing it')
+ phenny.iso_data = scrape_wiki_codes()
+ phenny.iso_data.update(phenny.ethno_data)
+ write_dict(f, phenny.iso_data)
+ else:
+ phenny.iso_data = scrape_wiki_codes()
+ phenny.iso_data.update(phenny.ethno_data)
+ write_dict(f, phenny.iso_data)
+
+
+iso639.name = 'iso639'
+#iso639.rule = (['iso639'], r'(.*)')
+iso639.commands = ['iso639']
+iso639.example = '.iso639 khk'
+iso639.priority = 'low'
+
+refresh_database.name = 'refresh_iso_database'
+refresh_database.commands = ['isodb update']
+refresh_database.thread = True
+
+thread_check.name = 'iso_thread_check'
+thread_check.commands = ['isodb status']
+
+if __name__ == '__main__':
+ print(__doc__.strip())
From 96763ad6d8af6f792292e37d2ebe0b4970d17d2b Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 21:41:22 -0500
Subject: [PATCH 15/27] Added .lgmtfy
---
modules/search.py | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/modules/search.py b/modules/search.py
index f74decc7c..7bec48d0e 100644
--- a/modules/search.py
+++ b/modules/search.py
@@ -182,5 +182,17 @@ def suggest(phenny, input):
else: phenny.reply('Sorry, no result.')
suggest.commands = ['suggest']
+def lmgtfy(phenny, input):
+ if not input.group(2):
+ phenny.reply('.lmgtfy what f who?')
+ try:
+ (who, what) = input.group(2).split(' ', 1)
+ response = "%s: http://lmgtfy.com/?q=%s"
+ what = web.quote(what)
+ phenny.say(response % (who, what))
+ except ValueError:
+ phenny.reply('.lmgtfy what for who? (enter a nick and a query)')
+lmgtfy.commands = ['lmgtfy']
+
if __name__ == '__main__':
print(__doc__.strip())
From e710e637dde6f3193c46d594210fd2a219a40fd4 Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 21:49:55 -0500
Subject: [PATCH 16/27] Updated .seen, added time since to response
---
modules/seen.py | 46 +++++++++++++++++++++++++++++++++++-----------
1 file changed, 35 insertions(+), 11 deletions(-)
diff --git a/modules/seen.py b/modules/seen.py
index e4dabbeaf..e369dc5d8 100644
--- a/modules/seen.py
+++ b/modules/seen.py
@@ -7,22 +7,24 @@
http://inamidst.com/phenny/
"""
-import time, os, shelve
+import time, os, shelve, datetime
from tools import deprecated
-@deprecated
-def f_seen(self, origin, match, args):
+def f_seen(phenny, input):
""".seen - Reports when was last seen."""
- nick = match.group(2).lower()
- if not hasattr(self, 'seen'):
- return self.msg(origin.sender, '?')
- if nick in self.seen:
- channel, t = self.seen[nick]
+ nick = input.group(2).lower()
+ if not hasattr(phenny, 'seen'):
+ return phenny.msg(input.sender, '?')
+ if nick in phenny.seen:
+ channel, t = phenny.seen[nick]
+ dt = timesince(datetime.datetime.utcfromtimestamp(t))
t = time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(t))
- msg = "I last saw %s at %s on %s" % (nick, t, channel)
- self.msg(origin.sender, str(origin.nick) + ': ' + msg)
- else: self.msg(origin.sender, "Sorry, I haven't seen %s around." % nick)
+ msg = "I last saw %s at %s (%s) on %s" % (nick, t, dt, channel)
+ phenny.reply(msg)
+ else: phenny.reply("Sorry, I haven't seen %s around." % nick)
+f_seen.name = 'seen'
+f_seen.example = '.seen firespeaker'
f_seen.rule = (['seen'], r'(\S+)')
@deprecated
@@ -41,5 +43,27 @@ def note(self, origin, match, args):
f_note.rule = r'(.*)'
f_note.priority = 'low'
+def timesince(td):
+ seconds = int(abs(datetime.datetime.utcnow() - td).total_seconds())
+ periods = [
+ ('year', 60*60*24*365),
+ ('month', 60*60*24*30),
+ ('day', 60*60*24),
+ ('hour', 60*60),
+ ('minute', 60),
+ ('second', 1)
+ ]
+
+ strings = []
+ for period_name, period_seconds in periods:
+ if seconds > period_seconds and len(strings) < 2:
+ period_value, seconds = divmod(seconds, period_seconds)
+ if period_value == 1:
+ strings.append("%s %s" % (period_value, period_name))
+ else:
+ strings.append("%s %ss" % (period_value, period_name))
+
+ return "just now" if len(strings) < 1 else " and ".join(strings) + " ago"
+
if __name__ == '__main__':
print(__doc__.strip())
From 88b2e39bc9687a1985f7ddd3adda541653ba1668 Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 21:53:49 -0500
Subject: [PATCH 17/27] Added pm functionality to tell command put **pm** in
message to have the message sent privately
---
modules/tell.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/modules/tell.py b/modules/tell.py
index 7249fd536..0587a0752 100644
--- a/modules/tell.py
+++ b/modules/tell.py
@@ -137,7 +137,11 @@ def message(phenny, input):
reminders.extend(getReminders(phenny, channel, remkey, tellee))
for line in reminders[:maximum]:
- phenny.say(line)
+ if "**pm**" in line:
+ line = line.replace("**pm**", "")
+ phenny.msg(tellee, line)
+ else:
+ phenny.say(line)
if reminders[maximum:]:
phenny.say('Further messages sent privately')
From ba3494f319f4e342cfa51e30ad2806a0abdd4c82 Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 21:55:39 -0500
Subject: [PATCH 18/27] Added .fight, .hug
---
modules/weirdfun.py | 49 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 49 insertions(+)
create mode 100644 modules/weirdfun.py
diff --git a/modules/weirdfun.py b/modules/weirdfun.py
new file mode 100644
index 000000000..816e89f85
--- /dev/null
+++ b/modules/weirdfun.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python3
+"""
+weirdfun.py - activities that weird people do
+author: firespeaker
+"""
+
+import random
+
+otherweird = "zfe"
+
+def fight(phenny, input):
+ """Have begiak fight someone for you."""
+ global otherweird
+ whouser = input.groups()[1]
+ already = False
+ if whouser:
+ otherweird = whouser
+ if whouser.lower()==phenny.nick.lower():
+ already = True
+ phenny.say("ouch!")
+
+ #### "hits %s", "punches %s", "kicks %s",, "stabs %s with a clean kitchen knife", "hits %s with a rubber hose",
+ messages = [ "hurts himself by accident while trying to attack %s", "directs his Öflazers at %s", "is bored of violence against %s", "thinks you should talk it over with %s first", "cocks %s's beer", "eats %s's hat", "sets %s up the bomb"]
+ response = random.choice(messages)
+
+ if not already:
+ phenny.do(response % otherweird)
+
+fight.commands = ['fight']
+fight.priority = 'low'
+fight.example = '.fight ChanServ'
+
+def hug(phenny, input):
+ """Have begiak hug someone for you."""
+ global otherweird
+ whouser = input.groups()[1]
+ if whouser:
+ otherweird = whouser
+
+ if whouser.lower()==phenny.nick.lower():
+ phenny.do("tries but fails.")
+ else:
+ phenny.do("hugs %s" % otherweird)
+
+hug.commands = ['hug']
+hug.priority = 'low'
+hug.example = '.hug ChanServ'
+if __name__ == '__main__':
+ print(__doc__.strip())
From 11379eee70eb7c0258995a825960d998dded61c9 Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 21:56:49 -0500
Subject: [PATCH 19/27] Added .wikicount counts the number of articles in the
specified language's wiki
---
modules/wiki_count.py | 119 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 119 insertions(+)
create mode 100644 modules/wiki_count.py
diff --git a/modules/wiki_count.py b/modules/wiki_count.py
new file mode 100644
index 000000000..93ad14c72
--- /dev/null
+++ b/modules/wiki_count.py
@@ -0,0 +1,119 @@
+"""
+.wikicount - return the number of wikipedia articles in lg's wiki
+author: mattr555
+"""
+
+from lxml import html
+import web
+
+def scrape_wiki_list():
+ data = {}
+ url = 'http://meta.wikimedia.org/wiki/List_of_Wikipedias'
+ resp = web.get(url)
+ h = html.document_fromstring(resp)
+ for e in h.find_class('sortable'):
+ for row in e.findall('tr')[1:]:
+ name = row.findall('td')[1].find('a').text
+ code = row.findall('td')[3].find('a').text
+ count = int(row.findall('td')[4].find('a/b').text.replace(',', ''))
+ data[code] = (name, count)
+ return data
+
+def scrape_incubator_list():
+ data = {}
+ url = 'http://incubator.wikimedia.org/wiki/Template:Tests/wp'
+ resp = web.get(url)
+ h = html.document_fromstring(resp)
+ for row in h.find_class('wikitable')[0].findall('tr')[2:]:
+ raw_name = row.findall('td')[0].find('a/b').text
+ name = ' '.join(raw_name.split(' ')[1:])
+ code = row.findall('td')[1].find('a').text.split(' ')[0][3:]
+ data[code] = (name, None)
+ return data
+
+def scrape_iso_3to1(d):
+ mapping = {}
+ resp = web.get('http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes')
+ h = html.document_fromstring(resp)
+ table = h.find_class('wikitable')[0]
+ for row in table.findall('tr')[1:]:
+ three_code = row.findall('td')[7].text.split(' ')[0]
+ one_code = row.findall('td')[4].text
+ if one_code in d:
+ mapping[three_code] = one_code
+ return mapping
+
+def wiki_response(info, lg):
+ if info[1] is not None:
+ url = 'http://{}.wikipedia.org/'.format(lg)
+ response = 'The {} ({}) Wikipedia has {:,} articles. {}'.format(
+ info[0], lg, info[1], url)
+ else:
+ url = 'http://incubator.wikimedia.org/wiki/Wp/' + lg
+ resp = web.get('http://incubator.wikimedia.org/wiki/Template:Wp/{}/NUMBEROFARTICLES'.format(lg))
+ num_articles = int(html.document_fromstring(resp).get_element_by_id('mw-content-text').find('p/a').text.replace(',', ''))
+ response = 'The {} ({}) Wikipedia is incubated and has {:,} articles. {}'.format(
+ info[0], lg, num_articles, url)
+ return response
+
+
+def wikicount(phenny, raw):
+ lg = raw.group(2).lower()
+ if lg == "update":
+ return
+ elif lg in phenny.wiki_data:
+ info = phenny.wiki_data[lg]
+ response = wiki_response(info, lg)
+ elif lg in phenny.wiki_iso_3_map:
+ real_lg = phenny.wiki_iso_3_map[lg]
+ info = phenny.wiki_data[real_lg]
+ response = wiki_response(info, real_lg)
+ else:
+ possible = []
+ for k, v in phenny.wiki_data.items():
+ if lg in v[0].lower():
+ possible.append((k,v))
+ if len(possible) == 1:
+ response = wiki_response(possible[0][1], possible[0][0])
+ elif len(possible) == 0:
+ response = "That wiki code wasn't found."
+ else:
+ did_you_mean = []
+ for i in possible:
+ if i[1][1] is not None:
+ did_you_mean.append('{} ({}, {:,} articles)'.format(i[1][0], i[0], i[1][1]))
+ else:
+ did_you_mean.append('{} ({}, incubated)'.format(i[1][0], i[0]))
+ response = "Did you mean: " + ', '.join(did_you_mean)
+
+ phenny.say(response)
+
+wikicount.name = 'wikicount'
+wikicount.commands = ['wikicount']
+wikicount.example = '.wikicount en'
+wikicount.priority = 'low'
+
+def update_article_count(phenny, raw=None):
+ if raw is None or raw.admin:
+ phenny.wiki_data = scrape_incubator_list()
+ phenny.wiki_data.update(scrape_wiki_list())
+ if raw:
+ phenny.say('Wikipedia article counts successfully updated.')
+ else:
+ phenny.say('Only admins can execute that command!')
+
+update_article_count.name = 'wikicount_update'
+update_article_count.commands = ['wikicount update']
+
+def setup(phenny):
+ update_article_count(phenny)
+ phenny.wiki_iso_3_map = scrape_iso_3to1(phenny.wiki_data)
+ phenny.wiki_iso_3_map.update({
+ 'sgs': 'bat-smg',
+ 'roa': 'nrm',
+ 'vro': 'fiu-vro',
+ 'yue': 'zh-yue',
+ 'nan': 'zh-min-nan',
+ 'lzh': 'zh-classical',
+ 'be-tarask': 'be-x-old'
+ })
From 40ed3de4835af184335201e62db996d251d38913 Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 21:59:46 -0500
Subject: [PATCH 20/27] Added language functionality to .wik .wik.
if no language is specified, english is defaulted
---
modules/wikipedia.py | 110 +++++++++++++++++++++++++++++++++++++------
1 file changed, 96 insertions(+), 14 deletions(-)
diff --git a/modules/wikipedia.py b/modules/wikipedia.py
index 8dbe6f48b..deb5d1e9e 100644
--- a/modules/wikipedia.py
+++ b/modules/wikipedia.py
@@ -7,27 +7,94 @@
http://inamidst.com/phenny/
"""
-import re
+import re, urllib.parse, wiki
+from lxml import etree
+import lxml.html
+import lxml.html.clean
import web
-import wiki
-wikiapi = 'https://en.wikipedia.org/w/api.php?action=query&list=search&srsearch={0}&limit=1&prop=snippet&format=json'
-wikiuri = 'https://en.wikipedia.org/wiki/{0}'
-wikisearch = 'https://en.wikipedia.org/wiki/Special:Search?' \
+wikiapi = 'https://%s.wikipedia.org/w/api.php?action=query&list=search&srsearch={0}&limit=1&prop=snippet&format=json'
+wikiuri = 'https://%s.wikipedia.org/wiki/{0}'
+wikisearch = 'https://%s.wikipedia.org/wiki/Special:Search?' \
+ 'search={0}&fulltext=Search'
-def wik(phenny, input):
- """.wik - Look up something on Wikipedia."""
+langs = ['ar', 'bg', 'ca', 'cs', 'da', 'de', 'en', 'es', 'eo', 'eu', 'fa', 'fr', 'ko', 'hi', 'hr', 'id', 'it', 'he', 'lt', 'hu', 'ms', 'nl', 'ja', 'no', 'pl', 'pt', 'kk', 'ro', 'ru', 'sk', 'sl', 'sr', 'fi', 'sv', 'tr', 'uk', 'vi', 'vo', 'war', 'zh']
+
+def format_term(term):
+ term = web.unquote(term)
+ term = web.quote(term)
+ term = term[0].upper() + term[1:]
+ term = term.replace(' ', '_')
+ return term
+
+def format_term_display(term):
+ term = web.unquote(term)
+ term = term[0].upper() + term[1:]
+ term = term.replace(' ', '_')
+ return term
+
+def format_subsection(section):
+ section = section.replace(' ', '_')
+ section = urllib.parse.quote_plus(section)
+ section = section.replace('%', '.')
+ section = section.replace(".3A", ":")
+ return section
+
+def parse_wiki_page(url, term, section = None):
+ try:
+ web_url = web.quote(url).replace("%3A", ":", 1)
+ html = str(web.get(web_url))
+ except:
+ return "A wiki page does not exist for that term."
+ page = lxml.html.fromstring(html)
+ if section is not None:
+ text = page.find(".//span[@id='%s']" % section)
+
+ if text is None:
+ return "That subsection does not exist."
+ text = text.getparent().getnext()
+
+ #a div tag may come before the text
+ while text.tag is not None and text.tag != "p":
+ text = text.getnext()
+ url += "#" + format_term_display(section)
+ else:
+ #Get first 3 paragraphs and find the one most
+ #likely to actually contain text.
+
+ texts = page.findall('.//p')[:4]
+ if len(texts) == 0:
+ return "Unable to find content. Search may be too broad."
+ texts.sort(key=len, reverse=True)
+ text = texts[0]
+
+ sentences = text.text_content().split(". ")
+ sentence = '"' + sentences[0] + '"'
+
+ maxlength = 440 - len(' - ' + url)
+ if len(sentence.encode('utf-8')) > maxlength:
+ sentence = sentence[:maxlength]
+ words = sentence[:-5].split(' ')
+ words.pop()
+ sentence = ' '.join(words) + ' [...]'
+
+ return sentence + ' - ' + url
+
+def wikipedia(phenny, origterm, lang):
+ origterm = origterm.strip()
+ lang = lang.strip()
- origterm = input.groups()[1]
if not origterm:
return phenny.say('Perhaps you meant ".wik Zen"?')
+
+ section = None
- term = web.unquote(origterm)
- term = term[0].upper() + term[1:]
- term = term.replace(' ', '_')
+ if "#" in origterm:
+ origterm, section = origterm.split("#")[:2]
+ section = format_subsection(section)
+ term = format_term(origterm)
- w = wiki.Wiki(wikiapi, wikiuri, wikisearch)
+ w = wiki.Wiki(wikiapi % lang, wikiuri % lang, wikisearch % lang)
try:
result = w.search(term)
@@ -36,11 +103,26 @@ def wik(phenny, input):
return phenny.say(error)
if result is not None:
- phenny.say(result)
+ #Disregarding [0], the snippet
+ url = result.split("|")[-1]
+ phenny.say(parse_wiki_page(url, term, section))
+
else:
phenny.say('Can\'t find anything in Wikipedia for "{0}".'.format(origterm))
-wik.commands = ['wik']
+def wik(phenny, input):
+ """Search for something on Wikipedia"""
+ origterm = input.group(3)
+ lang = "en"
+
+ m = re.match(r'\.(wik|wiki|wikipedia)\.([a-z]{2,3})(?: +(.*))', str(input))
+ if m:
+ lang = m.group(2)
+ origterm = m.group(3)
+
+ wikipedia(phenny, origterm, lang)
+wik.rule = r'\.(wik|wiki|wikipedia)(\.[a-z]{2,3})?\s(?:(.*))'
+#wik.commands = ['wik', 'wiki', 'wikipedia']
wik.priority = 'high'
if __name__ == '__main__':
From e37d6e27aa055f3ee5d11eb33dd83b24bbb6f674 Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 22:05:45 -0500
Subject: [PATCH 21/27] Added .ety- etymology lookup on wiktionary
---
modules/wiktionary.py | 93 +++++++++++++++++++++++++++++++++++--------
1 file changed, 77 insertions(+), 16 deletions(-)
diff --git a/modules/wiktionary.py b/modules/wiktionary.py
index 8770d1df4..31a264226 100644
--- a/modules/wiktionary.py
+++ b/modules/wiktionary.py
@@ -10,8 +10,9 @@
import re
import web
import json
+import re, urllib.request, urllib.parse, urllib.error
-uri = 'http://en.wiktionary.org/w/index.php?title=%s&printable=yes'
+uri = 'http://en.wiktionary.org/wiki/{0}?printable=yes'
wikiapi = 'http://en.wiktionary.org/w/api.php?action=query&titles={0}&prop=revisions&rvprop=content&format=json'
#r_tag = re.compile(r'<[^>]+>')
r_ul = re.compile(r'(?ims)')
@@ -22,6 +23,7 @@
r_context = re.compile(r'{{context\|(.+?)}}')
r_template1 = re.compile(r'{{.+?\|(.+?)}}')
r_template2 = re.compile(r'{{(.+?)}}')
+r_sqrbracket = re.compile(r'\[.+?\]')
def text(html):
text = r_li.sub('', html).strip()
@@ -31,9 +33,11 @@ def text(html):
text = r_context.sub(r'\1:', text)
text = r_template1.sub(r'\1:', text)
text = r_template2.sub(r'\1:', text)
+ text = text.replace("en|", '')
+ text = r_sqrbracket.sub('', text)
return text
-def wiktionary(word):
+def wiktionary(phenny, word):
bytes = web.get(wikiapi.format(web.quote(word)))
pages = json.loads(bytes)
pages = pages['query']['pages']
@@ -50,31 +54,73 @@ def wiktionary(word):
for line in result.splitlines():
if line == '===Etymology===':
mode = 'etymology'
- elif 'Noun' in line:
+ elif '=Noun=' in line:
mode = 'noun'
- elif 'Verb' in line:
+ elif '=Verb=' in line:
mode = 'verb'
- elif 'Adjective' in line:
+ elif '=Adjective=' in line:
mode = 'adjective'
- elif 'Adverb' in line:
+ elif '=Adverb=' in line:
mode = 'adverb'
- elif 'Interjection' in line:
+ elif '=Interjection=' in line:
mode = 'interjection'
- elif 'Particle' in line:
+ elif '=Particle=' in line:
mode = 'particle'
- elif 'Preposition' in line:
+ elif '=Preposition=' in line:
mode = 'preposition'
- elif len(line) == 0:
- mode = None
-
elif mode == 'etymology':
etymology = text(line)
- elif mode is not None and '#' in line:
+
+ if mode is not None and "#" in line and "#:" not in line:
definitions.setdefault(mode, []).append(text(line))
if '====Synonyms====' in line:
break
+
return etymology, definitions
+
+def get_between(strSource, strStart, strEnd): #get first string between 2 other strings
+ try:
+ parse = strSource.split(strStart, 2)[1]
+ parse = parse[:parse.find(strEnd)]
+ except:
+ parse = None
+ return parse
+
+def get_between_all(strSource, strStart, strEnd): #get all the strings between the 2 strings
+ list = []
+ start = 0
+ word = get_between(strSource, strStart, strEnd)
+ while (word != None):
+ list.append(word)
+ start = strSource.find("".join((strStart, word, strEnd)))
+ strSource = strSource[start+len("".join((strStart, word, strEnd))):]
+ word = get_between(strSource, strStart, strEnd)
+ return list
+
+def etymology(phenny, word):
+ ety_value = None
+ try:
+ opener = urllib.request.build_opener()
+ opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17')]
+ bytes = opener.open(uri.format(web.quote(word)))
+ html = bytes.read().decode('utf-8')
+ ety_value = get_between_all(html, '">Etymology', '
')
+ ety_value = " ".join(ety_value)
+ ety_value = re.compile(r'<[^<]*?/?>').sub('', ety_value)
+ ety_value = ety_value.replace(' ', '')
+ ety_value = ety_value.replace('From ', '← ')
+ ety_value = ety_value.replace(', from', ' ←')
+ ety_value = ety_value.replace('from ', '← ')
+ ety_value = word + ": " + ety_value.replace(".", '') + "."
+ ety_value = r_sqrbracket.sub('', ety_value)
+
+ if len(ety_value) > 300:
+ ety_value = ety_value[:295] + " [...]"
+ except:
+ ety_value = None
+ return ety_value
+
parts = ('preposition', 'particle', 'noun', 'verb',
'adjective', 'adverb', 'interjection')
@@ -90,12 +136,11 @@ def format(word, definitions, number=2):
return result.strip(' .,')
def w(phenny, input):
- """.w - Get the definition of a word from wiktionary."""
-
+ """Look up a word on Wiktionary."""
if not input.group(2):
return phenny.reply("Nothing to define.")
word = input.group(2)
- etymology, definitions = wiktionary(word)
+ etymology, definitions = wiktionary(phenny, word)
if not definitions:
phenny.say("Couldn't get any definitions for %s." % word)
return
@@ -105,6 +150,8 @@ def w(phenny, input):
result = format(word, definitions, 3)
if len(result) < 150:
result = format(word, definitions, 5)
+
+ result = result.replace('|_|', ' ').replace('|', ' ')
if len(result) > 300:
result = result[:295] + '[...]'
@@ -112,5 +159,19 @@ def w(phenny, input):
w.commands = ['w']
w.example = '.w bailiwick'
+def ety(phenny, input):
+ """Find the etymology of a word."""
+ if not input.group(2):
+ return phenny.reply("Nothing to define.")
+ word = input.group(2)
+ ety_val = ''
+ ety_val = etymology(phenny, word)
+ if not ety_val or ety_val == word + ' .':
+ phenny.say("Couldn't get any etymology for %s." % word)
+ return
+ phenny.say(text(ety_val))
+ety.commands = ['ety']
+ety.example = '.ety bailiwick'
+
if __name__ == '__main__':
print(__doc__.strip())
From e26e877cdeea9837e9f3210e2602352881dc51e8 Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 22:11:01 -0500
Subject: [PATCH 22/27] Added .whereis sees if a user left an away message with
.away
---
modules/away.py | 52 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 52 insertions(+)
create mode 100644 modules/away.py
diff --git a/modules/away.py b/modules/away.py
new file mode 100644
index 000000000..ec3fa584e
--- /dev/null
+++ b/modules/away.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""
+away.py - Phenny's record or who's away/present module
+"""
+
+import os, re, time, random
+import web
+
+statuses = {"alan": "boom bitchezzzz"}
+
+def whereis(phenny, input):
+ """Tells you nick's current status."""
+ whereis_nick = input.split(" ")[1]
+ print(input + " --> " + whereis_nick)
+ if (whereis_nick in list(statuses.keys())):
+ phenny.reply(whereis_nick + " said: " + statuses[whereis_nick])
+ else:
+ phenny.reply("Sorry, " + whereis_nick + " seems to be AWOL...")
+whereis.commands = ["whereis"]
+whereis.priority = 'low'
+whereis.example = '.whereis sushain'
+whereis.thread = False
+
+def away(phenny, input):
+ """Set your status to being away."""
+ nick = input.nick
+ if input.count(" ") == 0:
+ statuses[nick] = "I'm away right now"
+ else:
+ message = str(" ".join(input.split(" ")[1:]))
+ statuses[nick] = message
+away.commands = ['away']
+away.example = '.away eating pie'
+away.priority = 'low'
+away.thread = False
+
+def back(phenny, input):
+ """Set your status to being available."""
+ nick = input.nick
+ if input.count(" ") == 0:
+ statuses[nick] = "I'm around at the minute"
+ else:
+ message = str(" ".join(input.split(" ")[1:]))
+ statuses[nick] = message
+back.commands = ['back']
+back.example = '.back'
+back.priority = 'low'
+back.thread = False
+
+
+if __name__ == '__main__':
+ print(__doc__.strip())
From e4a41c3b453af73fd7cee19438896eed77db7326 Mon Sep 17 00:00:00 2001
From: Matthew Ramina
Date: Thu, 28 Nov 2013 22:23:32 -0500
Subject: [PATCH 23/27] Added minor documentation
---
modules/8ball.py | 2 ++
modules/botfun.py | 2 ++
modules/calc.py | 2 +-
modules/imdb.py | 1 +
modules/remind.py | 3 +++
modules/search.py | 6 +++++-
modules/urbandict.py | 1 +
7 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/modules/8ball.py b/modules/8ball.py
index 9c14cb8d9..bb9e1a575 100644
--- a/modules/8ball.py
+++ b/modules/8ball.py
@@ -46,6 +46,8 @@ def eightball(phenny, input):
quote = random.choice(quotes)
phenny.reply(quote)
eightball.commands = ['8ball']
+eightball.name = '8ball'
+eightball.example = '.8ball is pie amazing?'
if __name__ == '__main__':
print(__doc__.strip())
diff --git a/modules/botfun.py b/modules/botfun.py
index a06c1f7ce..10ab18fb7 100644
--- a/modules/botfun.py
+++ b/modules/botfun.py
@@ -17,6 +17,7 @@ def botfight(phenny, input):
phenny.do(response % otherbot)
botfight.commands = ['botfight']
botfight.priority = 'low'
+botfight.example = '.botfight'
def bothug(phenny, input):
""".bothug - Hug the other bot in the channel."""
@@ -24,6 +25,7 @@ def bothug(phenny, input):
phenny.do("hugs %s" % otherbot)
bothug.commands = ['bothug']
bothug.priority = 'low'
+bothug.example = '.bothug'
if __name__ == '__main__':
print(__doc__.strip())
diff --git a/modules/calc.py b/modules/calc.py
index 28e4fd600..ffbc32fa2 100644
--- a/modules/calc.py
+++ b/modules/calc.py
@@ -78,7 +78,7 @@ def wa(phenny, input):
phenny.say(answer)
wa.commands = ['wa']
-
+wa.example = '.wa answer to life'
if __name__ == '__main__':
print(__doc__.strip())
diff --git a/modules/imdb.py b/modules/imdb.py
index e49dfa753..108a286f6 100644
--- a/modules/imdb.py
+++ b/modules/imdb.py
@@ -38,3 +38,4 @@ def imdb(phenny, input):
except:
phenny.reply("No results found for '%s'." % query)
imdb.commands = ['imdb']
+imdb.example = '.imdb Promethius'
diff --git a/modules/remind.py b/modules/remind.py
index 56df81db5..df39ec640 100644
--- a/modules/remind.py
+++ b/modules/remind.py
@@ -103,6 +103,7 @@ def monitor(phenny):
r_command = re.compile(p_command)
def remind(phenny, input):
+ """Set a reminder"""
m = r_command.match(input.bytes)
if not m:
return phenny.reply("Sorry, didn't understand the input.")
@@ -131,6 +132,8 @@ def remind(phenny, input):
w += time.strftime(' at %H:%MZ', time.gmtime(t))
phenny.reply('Okay, will remind%s' % w)
else: phenny.reply('Okay, will remind in %s secs' % duration)
+remind.name = 'in'
+remind.example = '.in 15 minutes do work'
remind.commands = ['in']
r_time = re.compile(r'^([0-9]{2}[:.][0-9]{2})')
diff --git a/modules/search.py b/modules/search.py
index 7bec48d0e..d0ddc8909 100644
--- a/modules/search.py
+++ b/modules/search.py
@@ -75,6 +75,7 @@ def gc(phenny, input):
)
def gcs(phenny, input):
+ """Compare the number of Google results for the specified paramters."""
if not input.group(2):
return phenny.reply("Nothing to compare.")
queries = r_query.findall(input.group(2))
@@ -93,6 +94,7 @@ def gcs(phenny, input):
reply = ', '.join('%s (%s)' % (t, formatnumber(n)) for (t, n) in results)
phenny.say(reply)
gcs.commands = ['gcs', 'comp']
+gcs.example = '.gcs Ronaldo Messi'
r_bing = re.compile(r'