Merge lp:~wgrant/launchpad/delete-more-stuff into lp:launchpad
- delete-more-stuff
- Merge into devel
Status: | Merged |
---|---|
Approved by: | Gavin Panella |
Approved revision: | no longer in the source branch. |
Merged at revision: | not available |
Proposed branch: | lp:~wgrant/launchpad/delete-more-stuff |
Merge into: | lp:launchpad |
Diff against target: |
626 lines (+0/-576) 8 files modified
lib/canonical/Makefile (+0/-5) lib/canonical/doap/__init__.py (+0/-8) lib/canonical/doap/forage.py (+0/-283) lib/canonical/librarian/Makefile (+0/-18) lib/lp/archivepublisher/library.py (+0/-145) lib/lp/archivepublisher/tests/test_librarianwrapper.py (+0/-80) lib/lp/archivepublisher/tests/util.py (+0/-28) lib/psycopg.py (+0/-9) |
To merge this branch: | bzr merge lp:~wgrant/launchpad/delete-more-stuff |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Gavin Panella (community) | Approve | ||
Review via email: mp+24280@code.launchpad.net |
Commit message
Remove more unused bits and pieces.
Description of the change
This removes a few more unused bits and pieces from around the tree:
- Two broken Makefiles. lib/canonical/
- lib/canonical/doap is long-obsolete, with its only remaining non-empty file being unused and unmodified since 2004.
- lp.archivepubli
- lib/psycopg.py raises an exception on import, and appears to date from the pyscopg2 migration days. Nothing has imported it in a long time, so it's just clutter now.
- lib/canonical/
Gavin Panella (allenap) : | # |
Preview Diff
1 | === removed file 'lib/canonical/Makefile' | |||
2 | --- lib/canonical/Makefile 2005-10-31 18:29:12 +0000 | |||
3 | +++ lib/canonical/Makefile 1970-01-01 00:00:00 +0000 | |||
4 | @@ -1,5 +0,0 @@ | |||
5 | 1 | check: | ||
6 | 2 | # python ../../test_on_merge.py canonical.lp | ||
7 | 3 | |||
8 | 4 | .PHONY: check | ||
9 | 5 | |||
10 | 6 | 0 | ||
11 | === removed directory 'lib/canonical/doap' | |||
12 | === removed file 'lib/canonical/doap/__init__.py' | |||
13 | --- lib/canonical/doap/__init__.py 2009-06-25 05:39:50 +0000 | |||
14 | +++ lib/canonical/doap/__init__.py 1970-01-01 00:00:00 +0000 | |||
15 | @@ -1,8 +0,0 @@ | |||
16 | 1 | # Copyright 2009 Canonical Ltd. This software is licensed under the | ||
17 | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
18 | 3 | |||
19 | 4 | """This is the canonical.doap Python package. | ||
20 | 5 | |||
21 | 6 | The DOAP (Description Of A Project) subsystem of Launchpad tracks projects, | ||
22 | 7 | products, product releases and series' of releases. | ||
23 | 8 | """ | ||
24 | 9 | 0 | ||
25 | === removed file 'lib/canonical/doap/forage.py' | |||
26 | --- lib/canonical/doap/forage.py 2009-06-25 05:30:52 +0000 | |||
27 | +++ lib/canonical/doap/forage.py 1970-01-01 00:00:00 +0000 | |||
28 | @@ -1,283 +0,0 @@ | |||
29 | 1 | # Copyright 2009 Canonical Ltd. This software is licensed under the | ||
30 | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
31 | 3 | |||
32 | 4 | # Retrieve project details from sourceforge / freshmeat. | ||
33 | 5 | |||
34 | 6 | import urllib2 | ||
35 | 7 | import re | ||
36 | 8 | import string | ||
37 | 9 | |||
38 | 10 | # Constants | ||
39 | 11 | Error = 'sourceforge.py error' | ||
40 | 12 | |||
41 | 13 | def getProjectSpec(project, repository='sf'): | ||
42 | 14 | page = ProjectPage(project, repository) | ||
43 | 15 | #page.makeDict() | ||
44 | 16 | return page.getDict() | ||
45 | 17 | |||
46 | 18 | def makeURL(project, repository='sf'): | ||
47 | 19 | if repository=='sf': | ||
48 | 20 | url = 'http://sourceforge.net/projects/'+project+'/' | ||
49 | 21 | elif repository=='fm': | ||
50 | 22 | url = 'http://freshmeat.net/projects/'+project+'/' | ||
51 | 23 | else: raise Error, 'invalid repository: '+repository | ||
52 | 24 | return url | ||
53 | 25 | |||
54 | 26 | def getHTML(url): | ||
55 | 27 | try: urlobj = urllib2.urlopen(url) | ||
56 | 28 | except urllib2.HTTPError: return None | ||
57 | 29 | html = urlobj.read() | ||
58 | 30 | urlobj.close() | ||
59 | 31 | return html | ||
60 | 32 | |||
61 | 33 | def unobfuscate_fm_email(email): | ||
62 | 34 | delimiters = [[' [', '] '], [' |', '| '], [' (',') '], [' __','__ '], [' __dash__ ',' __dash__ '], [' |dash| ',' |dash| '], [' [dash] ',' [dash] '], [' (dash) ',' (dash) ']] | ||
63 | 35 | symbols = {'at': '@', 'dot': '.'} | ||
64 | 36 | for symbol in symbols.keys(): | ||
65 | 37 | for delimiter in delimiters: | ||
66 | 38 | email = string.join(string.split(email, delimiter[0]+symbol+delimiter[1]), symbols[symbol]) | ||
67 | 39 | return email | ||
68 | 40 | |||
69 | 41 | class ProjectPage: | ||
70 | 42 | def __init__(self, project, repository='sf'): | ||
71 | 43 | self.project = project | ||
72 | 44 | self.repository = repository | ||
73 | 45 | self.url = makeURL(self.project, self.repository) | ||
74 | 46 | self.html = getHTML(self.url) | ||
75 | 47 | if self.html == None: raise Error, 'Could not retrieve project details - perhaps project not found on '+self.repository | ||
76 | 48 | self.theDict = {} | ||
77 | 49 | if self.repository == 'sf': | ||
78 | 50 | if string.find(self.html, 'Invalid Project') > -1: | ||
79 | 51 | raise Error, 'Project not found on '+self.repository | ||
80 | 52 | elif self.repository == 'fm': | ||
81 | 53 | if string.find(self.html, 'The project name you specified could not be found in our database') > -1: | ||
82 | 54 | raise Error, 'Project not found on '+self.repository | ||
83 | 55 | self.makeDict() | ||
84 | 56 | |||
85 | 57 | def getProjectName(self): | ||
86 | 58 | if self.repository == 'sf': | ||
87 | 59 | result = re.search('Project: .*Summary', self.html) | ||
88 | 60 | s = self.html[result.start()+9:result.end()-9] | ||
89 | 61 | return s | ||
90 | 62 | else: | ||
91 | 63 | return None | ||
92 | 64 | |||
93 | 65 | |||
94 | 66 | def getDescription(self): | ||
95 | 67 | if self.repository == 'sf': | ||
96 | 68 | start = string.find(self.html, 'Summary</A>') | ||
97 | 69 | if start == -1: return None | ||
98 | 70 | start = string.find(self.html, '<TABLE', start) | ||
99 | 71 | start = string.find(self.html, '<p>', start) | ||
100 | 72 | end = string.find(self.html, '<p>', start+1) | ||
101 | 73 | s = self.html[start+3:end] | ||
102 | 74 | s = string.strip(s) | ||
103 | 75 | s = string.join(string.split(s, '\r\n'), ' ') | ||
104 | 76 | return s | ||
105 | 77 | elif self.repository == 'fm': | ||
106 | 78 | start = string.find(self.html, '<b>About:</b>') | ||
107 | 79 | if start == -1: return None | ||
108 | 80 | start = string.find(self.html, '<br>', start) | ||
109 | 81 | end = string.find(self.html, '<p>', start) | ||
110 | 82 | s = self.html[start+4:end] | ||
111 | 83 | s = string.strip(s) | ||
112 | 84 | s = string.join(string.split(s, '\r\n'), ' ') | ||
113 | 85 | return s | ||
114 | 86 | else: | ||
115 | 87 | return None | ||
116 | 88 | |||
117 | 89 | def getHomePage(self): | ||
118 | 90 | if self.repository == 'sf': | ||
119 | 91 | result = re.search('href.*Home\ Page', self.html) | ||
120 | 92 | if result == None: return None | ||
121 | 93 | s = self.html[result.start()+6:result.end()-11] | ||
122 | 94 | return s | ||
123 | 95 | elif self.repository == 'fm': | ||
124 | 96 | start = string.find(self.html, 'Homepage:') | ||
125 | 97 | if start == -1: return None | ||
126 | 98 | start = string.find(self.html, 'http://', start) | ||
127 | 99 | end = string.find(self.html, '</a>', start) | ||
128 | 100 | return self.html[start:end] | ||
129 | 101 | else: | ||
130 | 102 | return None | ||
131 | 103 | |||
132 | 104 | def getProgramminglang(self): | ||
133 | 105 | if self.repository == 'sf': | ||
134 | 106 | result = re.search('Programming\ Language.*BR>', self.html) | ||
135 | 107 | if result == None: return None | ||
136 | 108 | langstring = self.html[result.start()+22:result.end()] | ||
137 | 109 | # Find first BR | ||
138 | 110 | end = string.find(langstring, '<BR>') | ||
139 | 111 | langstring = langstring[:end] | ||
140 | 112 | # split up, remove <A...> tags | ||
141 | 113 | langlist1 = string.split(langstring, ',') | ||
142 | 114 | langlist = [] | ||
143 | 115 | for lang in langlist1: | ||
144 | 116 | start = string.find(lang, '>') | ||
145 | 117 | lang = lang[start+1:] | ||
146 | 118 | end = string.find(lang, '<') | ||
147 | 119 | lang = lang[:end] | ||
148 | 120 | langlist.append(lang) | ||
149 | 121 | return langlist | ||
150 | 122 | elif self.repository == 'fm': | ||
151 | 123 | start = string.find(self.html, '[Programming Language]') | ||
152 | 124 | if start == -1: return None | ||
153 | 125 | start = string.find(self.html, '<td', start) | ||
154 | 126 | start = string.find(self.html, '<td', start+1) | ||
155 | 127 | end = string.find(self.html, '</td>', start) | ||
156 | 128 | langstring = self.html[start:end] | ||
157 | 129 | langlist1 = string.split(langstring, ',') | ||
158 | 130 | langlist = [] | ||
159 | 131 | for lang in langlist1: | ||
160 | 132 | start = string.find(lang, '<small>') | ||
161 | 133 | start = start + 8 | ||
162 | 134 | end = string.find(lang, '<', start) | ||
163 | 135 | lang = lang[start:end] | ||
164 | 136 | langlist.append(lang) | ||
165 | 137 | return langlist | ||
166 | 138 | else: | ||
167 | 139 | return None | ||
168 | 140 | |||
169 | 141 | def getMailinglist(self): | ||
170 | 142 | # Check for mailing list page | ||
171 | 143 | if self.repository == 'sf': | ||
172 | 144 | start = string.find(self.html, ' Mailing Lists</A>') | ||
173 | 145 | if start == -1: return None | ||
174 | 146 | start = string.rfind(self.html, '/mail/?', 0, start) | ||
175 | 147 | end = string.find(self.html, '"', start+1) | ||
176 | 148 | listURL = 'http://sourceforge.net' + self.html[start:end] | ||
177 | 149 | # fetch mailing list page | ||
178 | 150 | self.listpage = getHTML(listURL) | ||
179 | 151 | # Extract mailing list URLs | ||
180 | 152 | start = 0 | ||
181 | 153 | urls = [] | ||
182 | 154 | while start >= 0: | ||
183 | 155 | start = string.find(self.listpage, 'Subscribe/Unsubscribe/Preferences', start+1) | ||
184 | 156 | if start >= 0: | ||
185 | 157 | urlstart = string.rfind(self.listpage, 'http://lists.sourceforge', 0, start) | ||
186 | 158 | urlend = start - 2 | ||
187 | 159 | url = self.listpage[urlstart:urlend] | ||
188 | 160 | urls.append(url) | ||
189 | 161 | # Construct return list | ||
190 | 162 | if urls: return urls | ||
191 | 163 | else: return None | ||
192 | 164 | elif self.repository == 'fm': | ||
193 | 165 | # | ||
194 | 166 | # Note: for FreshMeat, this currently only works for projects that point | ||
195 | 167 | # to a sourceforge page for the mailing lists. | ||
196 | 168 | # Other projects point to an arbitrary page somewhere else that | ||
197 | 169 | # cannot be parsed without further information. | ||
198 | 170 | # | ||
199 | 171 | start = string.find(self.html, 'Mailing list archive:</b>') | ||
200 | 172 | if start == -1: return None | ||
201 | 173 | end = string.find(self.html, '</a>', start) | ||
202 | 174 | start = string.find(self.html, 'http://sourceforge.net/mail/', start, end) | ||
203 | 175 | if start == -1: return None | ||
204 | 176 | listURL = self.html[start:end] | ||
205 | 177 | # fetch mailing list page | ||
206 | 178 | self.listpage = getHTML(listURL) | ||
207 | 179 | # Extract mailing list URLs | ||
208 | 180 | start = 0 | ||
209 | 181 | urls = [] | ||
210 | 182 | while start >= 0: | ||
211 | 183 | start = string.find(self.listpage, 'Subscribe/Unsubscribe/Preferences', start+1) | ||
212 | 184 | if start >= 0: | ||
213 | 185 | urlstart = string.rfind(self.listpage, 'http://lists.sourceforge', 0, start) | ||
214 | 186 | urlend = start - 2 | ||
215 | 187 | url = self.listpage[urlstart:urlend] | ||
216 | 188 | urls.append(url) | ||
217 | 189 | # Construct return list | ||
218 | 190 | if urls: return urls | ||
219 | 191 | else: return None | ||
220 | 192 | |||
221 | 193 | else: | ||
222 | 194 | return None | ||
223 | 195 | |||
224 | 196 | def getScreenshot(self): | ||
225 | 197 | # only freshmeat has screenshots | ||
226 | 198 | if self.repository == 'sf': | ||
227 | 199 | return None | ||
228 | 200 | elif self.repository == 'fm': | ||
229 | 201 | start = string.find(self.html, '<a target="screenshot"') | ||
230 | 202 | if start == -1: return None | ||
231 | 203 | start = string.find(self.html, 'href="/screenshots/', start) | ||
232 | 204 | end = string.find(self.html, '/">', start) | ||
233 | 205 | ssurl = 'http://freshmeat.net' + self.html[start+6:end+1] | ||
234 | 206 | return ssurl | ||
235 | 207 | else: return None | ||
236 | 208 | |||
237 | 209 | def getDevels(self): | ||
238 | 210 | if self.repository == 'sf': | ||
239 | 211 | # We can get list of project admins with @sf.net emails | ||
240 | 212 | start = string.find(self.html, 'Project Admins:</SPAN>') | ||
241 | 213 | if start == -1: return None | ||
242 | 214 | end = string.find(self.html, '<SPAN CLASS="develtitle">Developers', start) | ||
243 | 215 | adminhtml = self.html[start:end] | ||
244 | 216 | admins = [] | ||
245 | 217 | adminstart = 0 | ||
246 | 218 | while adminstart >= 0: | ||
247 | 219 | adminstart = string.find(adminhtml, '<a href="/users/', adminstart + 1) | ||
248 | 220 | if adminstart >= 0: | ||
249 | 221 | adminend = string.find(adminhtml, '">', adminstart) | ||
250 | 222 | adminurl = adminhtml[adminstart+16:adminend-1] | ||
251 | 223 | admins.append(adminurl) | ||
252 | 224 | devels = {} | ||
253 | 225 | for admin in admins: | ||
254 | 226 | adminurl = 'http://sourceforge.net/users/' + admin + '/' | ||
255 | 227 | adminhtml = getHTML(adminurl) | ||
256 | 228 | namestart = string.find(adminhtml, 'Publicly Displayed Name:') + 39 | ||
257 | 229 | nameend = string.find(adminhtml, '</B>', namestart) | ||
258 | 230 | name = adminhtml[namestart:nameend] | ||
259 | 231 | email = admin + '@users.sourceforge.net' | ||
260 | 232 | devels[name] = email | ||
261 | 233 | return devels | ||
262 | 234 | elif self.repository == 'fm': | ||
263 | 235 | # We can get a single author and obfuscated email address | ||
264 | 236 | start = string.find(self.html, '<b>Author:</b>') | ||
265 | 237 | if start == -1: return None | ||
266 | 238 | start = start + 18 | ||
267 | 239 | endname = string.find(self.html, '<a href', start) | ||
268 | 240 | checkForAddrInName = string.find(self.html, '<', start, endname) | ||
269 | 241 | if checkForAddrInName >= 0: | ||
270 | 242 | endname = checkForAddrInName | ||
271 | 243 | name = string.strip(self.html[start:endname]) | ||
272 | 244 | emailstart = string.find(self.html, '<a href', start) + 16 | ||
273 | 245 | emailend = string.find(self.html, '">', emailstart) | ||
274 | 246 | email = self.html[emailstart:emailend] | ||
275 | 247 | # unobfuscate email address | ||
276 | 248 | email = unobfuscate_fm_email(email) | ||
277 | 249 | return {name: email} | ||
278 | 250 | else: return None | ||
279 | 251 | |||
280 | 252 | |||
281 | 253 | def makeDict(self): | ||
282 | 254 | self.theDict = {} | ||
283 | 255 | self.theDict['project'] = self.project | ||
284 | 256 | # | ||
285 | 257 | projectname = self.getProjectName() | ||
286 | 258 | if projectname: self.theDict['projectname'] = projectname | ||
287 | 259 | # | ||
288 | 260 | homepage = self.getHomePage() | ||
289 | 261 | if homepage: self.theDict['homepage'] = homepage | ||
290 | 262 | # | ||
291 | 263 | programminglang = self.getProgramminglang() | ||
292 | 264 | if programminglang: self.theDict['programminglang'] = programminglang | ||
293 | 265 | else: self.theDict['programminglang'] = [] | ||
294 | 266 | # | ||
295 | 267 | description = self.getDescription() | ||
296 | 268 | if description: self.theDict['description'] = description | ||
297 | 269 | # | ||
298 | 270 | mailinglist = self.getMailinglist() | ||
299 | 271 | if mailinglist: self.theDict['list'] = mailinglist | ||
300 | 272 | else: self.theDict['list'] = [] | ||
301 | 273 | # | ||
302 | 274 | screenshot = self.getScreenshot() | ||
303 | 275 | if screenshot: self.theDict['screenshot'] = screenshot | ||
304 | 276 | # | ||
305 | 277 | devels = self.getDevels() | ||
306 | 278 | if devels: self.theDict['devels'] = devels | ||
307 | 279 | else: self.theDict['devels'] = {} | ||
308 | 280 | |||
309 | 281 | def getDict(self): | ||
310 | 282 | return self.theDict | ||
311 | 283 | |||
312 | 284 | 0 | ||
313 | === removed directory 'lib/canonical/doap/ftests' | |||
314 | === removed file 'lib/canonical/doap/ftests/__init__.py' | |||
315 | === removed file 'lib/canonical/librarian/Makefile' | |||
316 | --- lib/canonical/librarian/Makefile 2010-04-22 17:30:35 +0000 | |||
317 | +++ lib/canonical/librarian/Makefile 1970-01-01 00:00:00 +0000 | |||
318 | @@ -1,18 +0,0 @@ | |||
319 | 1 | PYTHON_VERSION:=2.5 | ||
320 | 2 | TWISTD:=twistd$(PYTHON_VERSION) | ||
321 | 3 | |||
322 | 4 | PYTHONPATH=../.. | ||
323 | 5 | pythonpath=PYTHONPATH=$(PYTHONPATH) | ||
324 | 6 | |||
325 | 7 | default: | ||
326 | 8 | |||
327 | 9 | tmpdirs=/tmp/fatsam/incoming | ||
328 | 10 | |||
329 | 11 | $(tmpdirs): | ||
330 | 12 | mkdir -p $@ | ||
331 | 13 | |||
332 | 14 | run: $(tmpdirs) | ||
333 | 15 | $(pythonpath) $(TWISTD) -noy server.tac | ||
334 | 16 | |||
335 | 17 | |||
336 | 18 | .PHONY: default run | ||
337 | 19 | 0 | ||
338 | === removed directory 'lib/canonical/not-used' | |||
339 | === removed directory 'lib/canonical/not-used/hctapi' | |||
340 | === removed file 'lib/lp/archivepublisher/library.py' | |||
341 | --- lib/lp/archivepublisher/library.py 2010-02-09 00:17:40 +0000 | |||
342 | +++ lib/lp/archivepublisher/library.py 1970-01-01 00:00:00 +0000 | |||
343 | @@ -1,145 +0,0 @@ | |||
344 | 1 | # Copyright 2009 Canonical Ltd. This software is licensed under the | ||
345 | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
346 | 3 | # | ||
347 | 4 | # Librarian class is the Librarian wrapper that provides local file cache | ||
348 | 5 | |||
349 | 6 | # XXX malcc 2006-08-03 bug=55031: | ||
350 | 7 | # This looks bogus; looks like it's not used, and assumptions about | ||
351 | 8 | # librarian URLs made here (and provided by the testing mocks) no longer | ||
352 | 9 | # hold for the real librarian. | ||
353 | 10 | # Can this whole file and its tests be squashed? | ||
354 | 11 | |||
355 | 12 | from canonical.librarian.client import FileDownloadClient | ||
356 | 13 | from canonical.librarian.client import FileUploadClient | ||
357 | 14 | |||
358 | 15 | import os | ||
359 | 16 | |||
360 | 17 | class Librarian (object): | ||
361 | 18 | |||
362 | 19 | def __init__(self, host, upload_port, download_port, cache): | ||
363 | 20 | self.librarian_host = host | ||
364 | 21 | self.upload_port = upload_port | ||
365 | 22 | self.download_port = download_port | ||
366 | 23 | self.cache_path = cache | ||
367 | 24 | if not os.access(cache, os.F_OK): | ||
368 | 25 | os.mkdir(cache) | ||
369 | 26 | |||
370 | 27 | |||
371 | 28 | def addFile(self, name, size, fileobj, contentType, digest=None, | ||
372 | 29 | cache=True, uploader=None): | ||
373 | 30 | """ | ||
374 | 31 | Add a file to the librarian with optional LOCAL CACHE handy | ||
375 | 32 | optimisation same parameters of original addFile and an optional | ||
376 | 33 | cache | ||
377 | 34 | |||
378 | 35 | :param cache: Optional boolean in order to allow local cache of File | ||
379 | 36 | :param uploader: Optional FileUploadClient instance (usefull for test) | ||
380 | 37 | """ | ||
381 | 38 | if not uploader: | ||
382 | 39 | uploader = FileUploadClient() | ||
383 | 40 | |||
384 | 41 | uploader.connect(self.librarian_host, self.upload_port) | ||
385 | 42 | |||
386 | 43 | fileid, filealias = uploader.addFile(name, size, fileobj, | ||
387 | 44 | contentType, digest) | ||
388 | 45 | |||
389 | 46 | if cache: | ||
390 | 47 | ## return to start of the file | ||
391 | 48 | fileobj.seek(0,0) | ||
392 | 49 | self.cacheFile(fileid, filealias, name, fileobj) | ||
393 | 50 | |||
394 | 51 | return fileid, filealias | ||
395 | 52 | |||
396 | 53 | def downloadFileToDisk(self, aliasID, archive, downloader=None): | ||
397 | 54 | """ | ||
398 | 55 | Download a file from Librarian to our LOCAL CACHE and link to | ||
399 | 56 | a given file name (major work for publishing in our archive) | ||
400 | 57 | |||
401 | 58 | :param aliasID: Librarian aliasID | ||
402 | 59 | :param filename: resulted file (/cache/<aliasID> should be linked | ||
403 | 60 | to filename) | ||
404 | 61 | :param downloader: Optional FileDownloadClient instance (useful for | ||
405 | 62 | testing process) | ||
406 | 63 | |||
407 | 64 | """ | ||
408 | 65 | if not downloader: | ||
409 | 66 | downloader = FileDownloadClient(self.librarian_host, | ||
410 | 67 | self.download_port) | ||
411 | 68 | |||
412 | 69 | path = downloader.getPathForAlias(aliasID) | ||
413 | 70 | |||
414 | 71 | # XXX: cprov 2004-11-22: | ||
415 | 72 | # The URL returned from Librarian must be correct | ||
416 | 73 | # first '/' results in garbage x !!! | ||
417 | 74 | x, fileid, filealias, name = path.split('/') | ||
418 | 75 | |||
419 | 76 | ## Verify if the file is already cached | ||
420 | 77 | if not self.isCached(path): | ||
421 | 78 | ## Grab file from Librarian | ||
422 | 79 | fp = downloader.getFileByAlias(aliasID) | ||
423 | 80 | |||
424 | 81 | ## Cache it | ||
425 | 82 | self.cacheFile(fileid, filealias, name, fp) | ||
426 | 83 | |||
427 | 84 | ##Link the cached file to the archive anyway, ensure it !! | ||
428 | 85 | path = os.path.join(self.cache_path, fileid, filealias, name) | ||
429 | 86 | self.linkFile(path, archive) | ||
430 | 87 | |||
431 | 88 | |||
432 | 89 | def cacheFile(self, fileid, filealias, name, fileobj): | ||
433 | 90 | ## efective creation of a file in fielsystem | ||
434 | 91 | # Don't spam the test runner please | ||
435 | 92 | #print 'Caching file', name | ||
436 | 93 | path = os.path.join(self.cache_path, fileid) | ||
437 | 94 | if not os.access(path, os.F_OK): | ||
438 | 95 | os.mkdir(path) | ||
439 | 96 | path = os.path.join(path, filealias) | ||
440 | 97 | if not os.access(path, os.F_OK): | ||
441 | 98 | os.mkdir(path) | ||
442 | 99 | filename = os.path.join(path, name) | ||
443 | 100 | cache = open(filename, "w") | ||
444 | 101 | content = fileobj.read() | ||
445 | 102 | cache.write(content) | ||
446 | 103 | cache.close() | ||
447 | 104 | |||
448 | 105 | |||
449 | 106 | def isCached(self, path): | ||
450 | 107 | filename = os.path.join(self.cache_path, path) | ||
451 | 108 | return os.access(filename, os.F_OK) | ||
452 | 109 | |||
453 | 110 | def linkFile(self, path, archive): | ||
454 | 111 | if os.path.exists(archive): | ||
455 | 112 | os.unlink(archive) | ||
456 | 113 | return os.link(path, archive) | ||
457 | 114 | |||
458 | 115 | if __name__ == '__main__': | ||
459 | 116 | import hashlib | ||
460 | 117 | import os | ||
461 | 118 | import sys | ||
462 | 119 | |||
463 | 120 | lib = Librarian('localhost', 9090, 8000, "/tmp/cache") | ||
464 | 121 | |||
465 | 122 | name = sys.argv[1] | ||
466 | 123 | archive = sys.argv[2] | ||
467 | 124 | |||
468 | 125 | print 'Uploading', name, 'to %s:%s' %(lib.librarian_host, | ||
469 | 126 | lib.upload_port) | ||
470 | 127 | fileobj = open(name, 'rb') | ||
471 | 128 | size = os.stat(name).st_size | ||
472 | 129 | digest = hashlib.sha1(open(name, 'rb').read()).hexdigest() | ||
473 | 130 | |||
474 | 131 | fileid, filealias = lib.addFile(name, size, fileobj, | ||
475 | 132 | contentType='test/test', | ||
476 | 133 | digest=digest) | ||
477 | 134 | |||
478 | 135 | print 'Done. File ID:', fileid | ||
479 | 136 | print 'File AliasID:', filealias | ||
480 | 137 | |||
481 | 138 | lib.downloadFileToDisk(filealias, archive) | ||
482 | 139 | |||
483 | 140 | fp = open(archive, 'r') | ||
484 | 141 | print 'First 50 bytes:' | ||
485 | 142 | print repr(fp.read(50)) | ||
486 | 143 | |||
487 | 144 | |||
488 | 145 | |||
489 | 146 | 0 | ||
490 | === removed file 'lib/lp/archivepublisher/tests/test_librarianwrapper.py' | |||
491 | --- lib/lp/archivepublisher/tests/test_librarianwrapper.py 2010-02-09 00:17:40 +0000 | |||
492 | +++ lib/lp/archivepublisher/tests/test_librarianwrapper.py 1970-01-01 00:00:00 +0000 | |||
493 | @@ -1,80 +0,0 @@ | |||
494 | 1 | # Copyright 2009 Canonical Ltd. This software is licensed under the | ||
495 | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
496 | 3 | |||
497 | 4 | """Tests for librarian wrapper (lp.archivepublisher.library.py)""" | ||
498 | 5 | |||
499 | 6 | __metaclass__ = type | ||
500 | 7 | |||
501 | 8 | import hashlib | ||
502 | 9 | import os | ||
503 | 10 | import shutil | ||
504 | 11 | import sys | ||
505 | 12 | import unittest | ||
506 | 13 | |||
507 | 14 | from lp.archivepublisher.tests import datadir | ||
508 | 15 | |||
509 | 16 | from lp.archivepublisher.tests.util import ( | ||
510 | 17 | FakeDownloadClient, FakeUploadClient) | ||
511 | 18 | |||
512 | 19 | |||
513 | 20 | class TestLibrarianWrapper(unittest.TestCase): | ||
514 | 21 | |||
515 | 22 | def setUp(self): | ||
516 | 23 | ## Create archive and cache dir ... | ||
517 | 24 | os.mkdir(datadir('archive')) | ||
518 | 25 | os.mkdir(datadir('cache')) | ||
519 | 26 | |||
520 | 27 | def tearDown(self): | ||
521 | 28 | shutil.rmtree(datadir('archive')) | ||
522 | 29 | shutil.rmtree(datadir('cache')) | ||
523 | 30 | |||
524 | 31 | def testImport(self): | ||
525 | 32 | """Librarian should be importable""" | ||
526 | 33 | from lp.archivepublisher.library import Librarian | ||
527 | 34 | |||
528 | 35 | def testInstatiate(self): | ||
529 | 36 | """Librarian should be instantiatable""" | ||
530 | 37 | from lp.archivepublisher.library import Librarian | ||
531 | 38 | lib = Librarian('localhost', 9090, 8000, datadir('cache')) | ||
532 | 39 | |||
533 | 40 | def testUpload(self): | ||
534 | 41 | """Librarian Upload""" | ||
535 | 42 | name = 'ed_0.2-20.dsc' | ||
536 | 43 | path = datadir(name) | ||
537 | 44 | |||
538 | 45 | from lp.archivepublisher.library import Librarian | ||
539 | 46 | lib = Librarian('localhost', 9090, 8000, datadir('cache')) | ||
540 | 47 | |||
541 | 48 | fileobj = open(path, 'rb') | ||
542 | 49 | size = os.stat(path).st_size | ||
543 | 50 | digest = hashlib.sha1(open(path, 'rb').read()).hexdigest() | ||
544 | 51 | |||
545 | 52 | ## Use Fake Librarian class | ||
546 | 53 | uploader = FakeUploadClient() | ||
547 | 54 | |||
548 | 55 | fileid, filealias = lib.addFile(name, size, fileobj, | ||
549 | 56 | contentType='test/test', | ||
550 | 57 | digest=digest, | ||
551 | 58 | uploader=uploader) | ||
552 | 59 | #print 'ID %s ALIAS %s' %(fileid, filealias) | ||
553 | 60 | |||
554 | 61 | cached = os.path.join(datadir('cache'), name) | ||
555 | 62 | os.path.exists(cached) | ||
556 | 63 | |||
557 | 64 | def testDownload(self): | ||
558 | 65 | """Librarian DownloadToDisk process""" | ||
559 | 66 | filealias = '1' | ||
560 | 67 | archive = os.path.join (datadir('archive'), 'test') | ||
561 | 68 | |||
562 | 69 | from lp.archivepublisher.library import Librarian | ||
563 | 70 | lib = Librarian('localhost', 9090, 8000, datadir('cache')) | ||
564 | 71 | ## Use Fake Librarian Class | ||
565 | 72 | downloader = FakeDownloadClient() | ||
566 | 73 | |||
567 | 74 | lib.downloadFileToDisk(filealias, archive, downloader=downloader) | ||
568 | 75 | |||
569 | 76 | os.path.exists(archive) | ||
570 | 77 | |||
571 | 78 | |||
572 | 79 | def test_suite(): | ||
573 | 80 | return unittest.TestLoader().loadTestsFromName(__name__) | ||
574 | 81 | 0 | ||
575 | === modified file 'lib/lp/archivepublisher/tests/util.py' | |||
576 | --- lib/lp/archivepublisher/tests/util.py 2009-12-13 11:55:40 +0000 | |||
577 | +++ lib/lp/archivepublisher/tests/util.py 2010-04-28 03:27:32 +0000 | |||
578 | @@ -216,34 +216,6 @@ | |||
579 | 216 | return thing # Assume we can't copy it deeply | 216 | return thing # Assume we can't copy it deeply |
580 | 217 | 217 | ||
581 | 218 | 218 | ||
582 | 219 | class FakeDownloadClient: | ||
583 | 220 | """Fake up a FileDownloadClient for the tests""" | ||
584 | 221 | def __init__(self): | ||
585 | 222 | pass | ||
586 | 223 | |||
587 | 224 | def getFileByAlias(self, alias): | ||
588 | 225 | """Fake this up by returning data/aliases/alias""" | ||
589 | 226 | return file("%s/%s" % (datadir("aliases"), alias), "r") | ||
590 | 227 | |||
591 | 228 | def getPathForAlias(self, alias): | ||
592 | 229 | """Fake this up by returning the PATH 'alias/alias/alias'""" | ||
593 | 230 | return "/%s/%s/%s" % (alias, alias, alias) | ||
594 | 231 | |||
595 | 232 | |||
596 | 233 | class FakeUploadClient: | ||
597 | 234 | """Fake up a FileUploadClient for the tests""" | ||
598 | 235 | def __init__(self): | ||
599 | 236 | pass | ||
600 | 237 | |||
601 | 238 | def connect(self, host, port): | ||
602 | 239 | pass | ||
603 | 240 | |||
604 | 241 | def addFile(self, name, size, fileobj, contentType, digest): | ||
605 | 242 | fileid = '1' | ||
606 | 243 | filealias = '1' | ||
607 | 244 | return fileid, filealias | ||
608 | 245 | |||
609 | 246 | |||
610 | 247 | # NOTE: If you alter the configs here remember to add tests in test_config.py | 219 | # NOTE: If you alter the configs here remember to add tests in test_config.py |
611 | 248 | fake_ubuntu = FakeDistribution("ubuntu", | 220 | fake_ubuntu = FakeDistribution("ubuntu", |
612 | 249 | """ | 221 | """ |
613 | 250 | 222 | ||
614 | === removed file 'lib/psycopg.py' | |||
615 | --- lib/psycopg.py 2009-06-25 05:59:58 +0000 | |||
616 | +++ lib/psycopg.py 1970-01-01 00:00:00 +0000 | |||
617 | @@ -1,9 +0,0 @@ | |||
618 | 1 | # Copyright 2009 Canonical Ltd. This software is licensed under the | ||
619 | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
620 | 3 | |||
621 | 4 | """This is not Psycopg 1.""" | ||
622 | 5 | |||
623 | 6 | class Psycopg1Imported(ImportError): | ||
624 | 7 | pass | ||
625 | 8 | |||
626 | 9 | raise Psycopg1Imported('Importing Psycopg 1.x is forbidden') |