Merge lp:~stefanor/ibid/exchange-336443 into lp:~ibid-core/ibid/old-trunk-pack-0.92

Proposed by Stefano Rivera
Status: Merged
Approved by: Michael Gorven
Approved revision: 575
Merged at revision: 566
Proposed branch: lp:~stefanor/ibid/exchange-336443
Merge into: lp:~ibid-core/ibid/old-trunk-pack-0.92
Diff against target: None lines
To merge this branch: bzr merge lp:~stefanor/ibid/exchange-336443
Reviewer Review Type Date Requested Status
Michael Gorven Approve
Jonathan Hitchcock Approve
Review via email: mp+4267@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Stefano Rivera (stefanor) wrote :

Added a dependency. This should probably be tested on hardy.

lp:~stefanor/ibid/exchange-336443 updated
564. By Stefano Rivera

Move get_soup to utils

565. By Stefano Rivera

Merge from trunk

Revision history for this message
Jonathan Hitchcock (vhata) :
review: Approve
Revision history for this message
Michael Gorven (mgorven) wrote :

Looks good. If this works on hardy I'm happy to merge.

lp:~stefanor/ibid/exchange-336443 updated
566. By Stefano Rivera

Further INSTALL work

567. By Stefano Rivera

Use elemnttree for XE.com

568. By Stefano Rivera

While python-html5lib suggests celemnttree, we should probably ensure it gets installed

569. By Stefano Rivera

Wrong DNS library

570. By Stefano Rivera

Other nicities

571. By Stefano Rivera

Yet more nicities

572. By Stefano Rivera

Use pure BS, if BS is requested

573. By Stefano Rivera

Don't stomp all over the other 'convert' functions

574. By Stefano Rivera

Unicode fix

575. By Stefano Rivera

Merge from trunk

Revision history for this message
Michael Gorven (mgorven) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'INSTALL'
2--- INSTALL 2009-02-24 16:03:17 +0000
3+++ INSTALL 2009-03-07 13:40:14 +0000
4@@ -1,32 +1,26 @@
5 Installation Instructions for Debian/Ubuntu Systems:
6
7 Add the Ibid PPA to your APT sources:
8-# echo deb http://ppa.launchpad.net/ibid-dev/ppa/ubuntu `lsb_release -c | cut -f2` main > /etc/apt/sources.list.d/ibid
9+# echo deb http://ppa.launchpad.net/ibid-core/ppa/ubuntu `lsb_release -c | cut -f2` main > /etc/apt/sources.list.d/ibid.list
10+# apt-key adv --recv-keys --keyserver keyserver.ubuntu.com c2d0f8531bba37930c0d85e3d59f9e8dfd1c44ba
11+# aptitude update
12
13 Install required modules:
14-# apt-get install python-virtualenv python-soappy python-twisted \
15+# aptitude install python-virtualenv python-soappy python-twisted \
16 python-configobj python-sqllite2 python-feedparser \
17- python-httplib2 python-beautifulsoup python-dictclient \
18+ python-httplib2 python-html5lib python-dictclient \
19 python-imdbpy python-dns python-simplejson \
20- python-jinja pysilc python-pinder
21+ python-jinja python-pysilc python-pinder
22
23 Switch to the user ibid will be running as.
24-Set up a virtual Python environment
25-(this will create a directory called ibid):
26-$ virtualenv ibid
27-$ source ibid/bin/activate
28-
29-Change directory to where you extracted Ibid.
30-
31-Set up any dependancies:
32-$ ./setup.py install
33+$ export PYTHONPATH=.
34
35 Set up your bot:
36-$ ibid-setup
37+$ scripts/ibid-setup
38 $ mkdir logs
39
40 Run your bot:
41-$ ibid
42+$ scripts/ibid
43
44 Other things we recommend:
45 * Install a dictd on localhost for the dict plugin (debian/ubuntu package "dictd")
46
47=== modified file 'ibid/plugins/lookup.py'
48--- ibid/plugins/lookup.py 2009-03-02 09:21:35 +0000
49+++ ibid/plugins/lookup.py 2009-03-07 13:40:04 +0000
50@@ -3,10 +3,11 @@
51 from time import time
52 from datetime import datetime
53 from simplejson import loads
54+import cgi
55 import re
56
57 import feedparser
58-from BeautifulSoup import BeautifulSoup
59+from html5lib import HTMLParser, treebuilders
60
61 from ibid.plugins import Processor, match, handler
62 from ibid.config import Option
63@@ -14,6 +15,25 @@
64
65 help = {}
66
67+def get_soup(url, data=None, headers={}):
68+ "Request a URL and create a BeautifulSoup parse tree from it"
69+
70+ req = Request(url, data, headers)
71+ f = urlopen(req)
72+ data = f.read()
73+ f.close()
74+
75+ encoding = None
76+ contentType = f.headers.get('content-type')
77+ if contentType:
78+ (mediaType, params) = cgi.parse_header(contentType)
79+ encoding = params.get('charset')
80+
81+ treebuilder = treebuilders.getTreeBuilder("beautifulsoup")
82+ parser = HTMLParser(tree=treebuilder)
83+
84+ return parser.parse(data, encoding = encoding)
85+
86 help['bash'] = u'Retrieve quotes from bash.org.'
87 class Bash(Processor):
88 u"bash[.org] (random|<number>)"
89@@ -22,15 +42,13 @@
90
91 @match(r'^bash(?:\.org)?\s+(random|\d+)$')
92 def bash(self, event, quote):
93- f = urlopen('http://bash.org/?%s' % quote.lower())
94- soup = BeautifulSoup(f.read(), convertEntities=BeautifulSoup.HTML_ENTITIES)
95- f.close()
96+ soup = get_soup('http://bash.org/?%s' % quote.lower())
97
98 if quote.lower() == "random":
99- number = u"".join(soup.find('p', attrs={'class': 'quote'}).find('b').contents)
100+ number = u"".join(soup.find('p', 'quote').find('b').contents)
101 event.addresponse(u"%s:" % number)
102
103- quote = soup.find('p', attrs={'class': 'qt'})
104+ quote = soup.find('p', 'qt')
105 if not quote:
106 event.addresponse(u"There's no such quote, but if you keep talking like that maybe there will be.")
107 else:
108@@ -100,12 +118,14 @@
109 feature = "fml"
110
111 def remote_get(self, id):
112- f = urlopen('http://www.fmylife.com/' + str(id))
113- soup = BeautifulSoup(f.read())
114- f.close()
115+ soup = get_soup('http://www.fmylife.com/' + str(id))
116
117 quote = soup.find('div', id='wrapper').div.p
118- return quote and u'"%s"' % (quote.contents[0],) or None
119+ if quote:
120+ url = u"http://www.fmylife.com" + quote.find('a', 'fmllink')['href']
121+ quote = u"".join(tag.contents[0] for tag in quote.findAll(True))
122+
123+ return u'%s: "%s"' % (url, quote)
124
125 @match(r'^(?:fml\s+|http://www\.fmylife\.com/\S+/)(\d+|random)$')
126 def fml(self, event, id):
127@@ -170,14 +190,12 @@
128 currencies = []
129
130 def _load_currencies(self):
131- request = Request('http://www.xe.com/iso4217.php', '', self.headers)
132- f = urlopen(request)
133- soup = BeautifulSoup(f.read())
134- f.close()
135+ soup = get_soup('http://www.xe.com/iso4217.php', headers=self.headers)
136
137 self.currencies = []
138- for tr in soup.find('table', attrs={'class': 'tbl_main'}).table.findAll('tr'):
139+ for tr in soup.find('table', 'tbl_main').table.findAll('tr'):
140 code, place = tr.findAll('td')
141+ code = code.contents[0]
142 place = ''.join(place.findAll(text=True))
143 place, name = place.find(',') != -1 and place.split(',', 1) or place.split(' ', 1)
144 self.currencies.append((code.string, place.strip(), name.strip()))
145@@ -185,12 +203,9 @@
146 @match(r'^(?:exchange|convert)\s+([0-9.]+)\s+(\S+)\s+(?:for|to|into)\s+(\S+)$')
147 def exchange(self, event, amount, frm, to):
148 data = {'Amount': amount, 'From': frm, 'To': to}
149- request = Request('http://www.xe.com/ucc/convert.cgi', urlencode(data), self.headers)
150- f = urlopen(request)
151- soup = BeautifulSoup(f.read())
152- f.close()
153+ soup = get_soup('http://www.xe.com/ucc/convert.cgi', urlencode(data), self.headers)
154
155- event.addresponse(soup.findAll('span', attrs={'class': 'XEsmall'})[1].contents[0])
156+ event.addresponse(u" ".join(tag.contents[0] for tag in soup.findAll('h2', 'XE')))
157
158 @match(r'^(?:currency|currencies)\s+for\s+(?:the\s+)?(.+)$')
159 def currency(self, event, place):
160@@ -238,9 +253,7 @@
161 if place.lower() in self.places:
162 place = self.places[place.lower()]
163
164- f = urlopen('http://m.wund.com/cgi-bin/findweather/getForecast?brand=mobile_metric&query=' + quote(place))
165- soup = BeautifulSoup(f.read(), convertEntities=BeautifulSoup.HTML_ENTITIES)
166- f.close()
167+ soup = get_soup('http://m.wund.com/cgi-bin/findweather/getForecast?brand=mobile_metric&query=' + quote(place))
168
169 if soup.body.center and soup.body.center.b.string == 'Search not found:':
170 raise Weather.WeatherException(u'City not found')
171@@ -257,7 +270,7 @@
172 soup = self._get_page(place)
173 tds = soup.table.table.findAll('td')
174
175- values = {'place': tds[0].findAll('b')[1].string, 'time': tds[0].findAll('b')[0].string}
176+ values = {'place': tds[0].findAll('b')[1].contents[0], 'time': tds[0].findAll('b')[0].contents[0]}
177 for index, td in enumerate(tds[2::2]):
178 values[self.labels[index]] = self._text(td)
179
180@@ -268,7 +281,7 @@
181 forecasts = []
182
183 for td in soup.findAll('table')[2].findAll('td', align='left'):
184- day = td.b.string
185+ day = td.b.contents[0]
186 forecast = td.contents[2]
187 forecasts.append('%s: %s' % (day, self._text(forecast)))
188

Subscribers

People subscribed via source and target branches