Merge lp:~wgrant/launchpad/delete-more-stuff into lp:launchpad

Proposed by William Grant
Status: Merged
Approved by: Gavin Panella
Approved revision: no longer in the source branch.
Merged at revision: not available
Proposed branch: lp:~wgrant/launchpad/delete-more-stuff
Merge into: lp:launchpad
Diff against target: 626 lines (+0/-576)
8 files modified
lib/canonical/Makefile (+0/-5)
lib/canonical/doap/__init__.py (+0/-8)
lib/canonical/doap/forage.py (+0/-283)
lib/canonical/librarian/Makefile (+0/-18)
lib/lp/archivepublisher/library.py (+0/-145)
lib/lp/archivepublisher/tests/test_librarianwrapper.py (+0/-80)
lib/lp/archivepublisher/tests/util.py (+0/-28)
lib/psycopg.py (+0/-9)
To merge this branch: bzr merge lp:~wgrant/launchpad/delete-more-stuff
Reviewer Review Type Date Requested Status
Gavin Panella (community) Approve
Review via email: mp+24280@code.launchpad.net

Commit message

Remove more unused bits and pieces.

Description of the change

This removes a few more unused bits and pieces from around the tree:

 - Two broken Makefiles. lib/canonical/Makefile had its only statement commented out. lib/canonical/librarian/Makefile referenced long-gone TACs and obsolete directories -- it hasn't been useful or even worked for a long time.
 - lib/canonical/doap is long-obsolete, with its only remaining non-empty file being unused and unmodified since 2004.
 - lp.archivepublisher.library has been unused and broken for four or five years, with a comment at the top from four years ago suggesting that it be removed.
 - lib/psycopg.py raises an exception on import, and appears to date from the pyscopg2 migration days. Nothing has imported it in a long time, so it's just clutter now.
 - lib/canonical/not-used is... not used. It had a single empty directory inside it.

To post a comment you must log in.
Revision history for this message
Gavin Panella (allenap) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== removed file 'lib/canonical/Makefile'
2--- lib/canonical/Makefile 2005-10-31 18:29:12 +0000
3+++ lib/canonical/Makefile 1970-01-01 00:00:00 +0000
4@@ -1,5 +0,0 @@
5-check:
6-# python ../../test_on_merge.py canonical.lp
7-
8-.PHONY: check
9-
10
11=== removed directory 'lib/canonical/doap'
12=== removed file 'lib/canonical/doap/__init__.py'
13--- lib/canonical/doap/__init__.py 2009-06-25 05:39:50 +0000
14+++ lib/canonical/doap/__init__.py 1970-01-01 00:00:00 +0000
15@@ -1,8 +0,0 @@
16-# Copyright 2009 Canonical Ltd. This software is licensed under the
17-# GNU Affero General Public License version 3 (see the file LICENSE).
18-
19-"""This is the canonical.doap Python package.
20-
21-The DOAP (Description Of A Project) subsystem of Launchpad tracks projects,
22-products, product releases and series' of releases.
23-"""
24
25=== removed file 'lib/canonical/doap/forage.py'
26--- lib/canonical/doap/forage.py 2009-06-25 05:30:52 +0000
27+++ lib/canonical/doap/forage.py 1970-01-01 00:00:00 +0000
28@@ -1,283 +0,0 @@
29-# Copyright 2009 Canonical Ltd. This software is licensed under the
30-# GNU Affero General Public License version 3 (see the file LICENSE).
31-
32-# Retrieve project details from sourceforge / freshmeat.
33-
34-import urllib2
35-import re
36-import string
37-
38-# Constants
39-Error = 'sourceforge.py error'
40-
41-def getProjectSpec(project, repository='sf'):
42- page = ProjectPage(project, repository)
43- #page.makeDict()
44- return page.getDict()
45-
46-def makeURL(project, repository='sf'):
47- if repository=='sf':
48- url = 'http://sourceforge.net/projects/'+project+'/'
49- elif repository=='fm':
50- url = 'http://freshmeat.net/projects/'+project+'/'
51- else: raise Error, 'invalid repository: '+repository
52- return url
53-
54-def getHTML(url):
55- try: urlobj = urllib2.urlopen(url)
56- except urllib2.HTTPError: return None
57- html = urlobj.read()
58- urlobj.close()
59- return html
60-
61-def unobfuscate_fm_email(email):
62- delimiters = [[' [', '] '], [' |', '| '], [' (',') '], [' __','__ '], [' __dash__ ',' __dash__ '], [' |dash| ',' |dash| '], [' [dash] ',' [dash] '], [' (dash) ',' (dash) ']]
63- symbols = {'at': '@', 'dot': '.'}
64- for symbol in symbols.keys():
65- for delimiter in delimiters:
66- email = string.join(string.split(email, delimiter[0]+symbol+delimiter[1]), symbols[symbol])
67- return email
68-
69-class ProjectPage:
70- def __init__(self, project, repository='sf'):
71- self.project = project
72- self.repository = repository
73- self.url = makeURL(self.project, self.repository)
74- self.html = getHTML(self.url)
75- if self.html == None: raise Error, 'Could not retrieve project details - perhaps project not found on '+self.repository
76- self.theDict = {}
77- if self.repository == 'sf':
78- if string.find(self.html, 'Invalid Project') > -1:
79- raise Error, 'Project not found on '+self.repository
80- elif self.repository == 'fm':
81- if string.find(self.html, 'The project name you specified could not be found in our database') > -1:
82- raise Error, 'Project not found on '+self.repository
83- self.makeDict()
84-
85- def getProjectName(self):
86- if self.repository == 'sf':
87- result = re.search('Project: .*Summary', self.html)
88- s = self.html[result.start()+9:result.end()-9]
89- return s
90- else:
91- return None
92-
93-
94- def getDescription(self):
95- if self.repository == 'sf':
96- start = string.find(self.html, 'Summary</A>')
97- if start == -1: return None
98- start = string.find(self.html, '<TABLE', start)
99- start = string.find(self.html, '<p>', start)
100- end = string.find(self.html, '<p>', start+1)
101- s = self.html[start+3:end]
102- s = string.strip(s)
103- s = string.join(string.split(s, '\r\n'), ' ')
104- return s
105- elif self.repository == 'fm':
106- start = string.find(self.html, '<b>About:</b>')
107- if start == -1: return None
108- start = string.find(self.html, '<br>', start)
109- end = string.find(self.html, '<p>', start)
110- s = self.html[start+4:end]
111- s = string.strip(s)
112- s = string.join(string.split(s, '\r\n'), ' ')
113- return s
114- else:
115- return None
116-
117- def getHomePage(self):
118- if self.repository == 'sf':
119- result = re.search('href.*Home\ Page', self.html)
120- if result == None: return None
121- s = self.html[result.start()+6:result.end()-11]
122- return s
123- elif self.repository == 'fm':
124- start = string.find(self.html, 'Homepage:')
125- if start == -1: return None
126- start = string.find(self.html, 'http://', start)
127- end = string.find(self.html, '</a>', start)
128- return self.html[start:end]
129- else:
130- return None
131-
132- def getProgramminglang(self):
133- if self.repository == 'sf':
134- result = re.search('Programming\ Language.*BR>', self.html)
135- if result == None: return None
136- langstring = self.html[result.start()+22:result.end()]
137- # Find first BR
138- end = string.find(langstring, '<BR>')
139- langstring = langstring[:end]
140- # split up, remove <A...> tags
141- langlist1 = string.split(langstring, ',')
142- langlist = []
143- for lang in langlist1:
144- start = string.find(lang, '>')
145- lang = lang[start+1:]
146- end = string.find(lang, '<')
147- lang = lang[:end]
148- langlist.append(lang)
149- return langlist
150- elif self.repository == 'fm':
151- start = string.find(self.html, '[Programming Language]')
152- if start == -1: return None
153- start = string.find(self.html, '<td', start)
154- start = string.find(self.html, '<td', start+1)
155- end = string.find(self.html, '</td>', start)
156- langstring = self.html[start:end]
157- langlist1 = string.split(langstring, ',')
158- langlist = []
159- for lang in langlist1:
160- start = string.find(lang, '<small>')
161- start = start + 8
162- end = string.find(lang, '<', start)
163- lang = lang[start:end]
164- langlist.append(lang)
165- return langlist
166- else:
167- return None
168-
169- def getMailinglist(self):
170- # Check for mailing list page
171- if self.repository == 'sf':
172- start = string.find(self.html, '&nbsp;Mailing Lists</A>')
173- if start == -1: return None
174- start = string.rfind(self.html, '/mail/?', 0, start)
175- end = string.find(self.html, '"', start+1)
176- listURL = 'http://sourceforge.net' + self.html[start:end]
177- # fetch mailing list page
178- self.listpage = getHTML(listURL)
179- # Extract mailing list URLs
180- start = 0
181- urls = []
182- while start >= 0:
183- start = string.find(self.listpage, 'Subscribe/Unsubscribe/Preferences', start+1)
184- if start >= 0:
185- urlstart = string.rfind(self.listpage, 'http://lists.sourceforge', 0, start)
186- urlend = start - 2
187- url = self.listpage[urlstart:urlend]
188- urls.append(url)
189- # Construct return list
190- if urls: return urls
191- else: return None
192- elif self.repository == 'fm':
193- #
194- # Note: for FreshMeat, this currently only works for projects that point
195- # to a sourceforge page for the mailing lists.
196- # Other projects point to an arbitrary page somewhere else that
197- # cannot be parsed without further information.
198- #
199- start = string.find(self.html, 'Mailing list archive:</b>')
200- if start == -1: return None
201- end = string.find(self.html, '</a>', start)
202- start = string.find(self.html, 'http://sourceforge.net/mail/', start, end)
203- if start == -1: return None
204- listURL = self.html[start:end]
205- # fetch mailing list page
206- self.listpage = getHTML(listURL)
207- # Extract mailing list URLs
208- start = 0
209- urls = []
210- while start >= 0:
211- start = string.find(self.listpage, 'Subscribe/Unsubscribe/Preferences', start+1)
212- if start >= 0:
213- urlstart = string.rfind(self.listpage, 'http://lists.sourceforge', 0, start)
214- urlend = start - 2
215- url = self.listpage[urlstart:urlend]
216- urls.append(url)
217- # Construct return list
218- if urls: return urls
219- else: return None
220-
221- else:
222- return None
223-
224- def getScreenshot(self):
225- # only freshmeat has screenshots
226- if self.repository == 'sf':
227- return None
228- elif self.repository == 'fm':
229- start = string.find(self.html, '<a target="screenshot"')
230- if start == -1: return None
231- start = string.find(self.html, 'href="/screenshots/', start)
232- end = string.find(self.html, '/">', start)
233- ssurl = 'http://freshmeat.net' + self.html[start+6:end+1]
234- return ssurl
235- else: return None
236-
237- def getDevels(self):
238- if self.repository == 'sf':
239- # We can get list of project admins with @sf.net emails
240- start = string.find(self.html, 'Project Admins:</SPAN>')
241- if start == -1: return None
242- end = string.find(self.html, '<SPAN CLASS="develtitle">Developers', start)
243- adminhtml = self.html[start:end]
244- admins = []
245- adminstart = 0
246- while adminstart >= 0:
247- adminstart = string.find(adminhtml, '<a href="/users/', adminstart + 1)
248- if adminstart >= 0:
249- adminend = string.find(adminhtml, '">', adminstart)
250- adminurl = adminhtml[adminstart+16:adminend-1]
251- admins.append(adminurl)
252- devels = {}
253- for admin in admins:
254- adminurl = 'http://sourceforge.net/users/' + admin + '/'
255- adminhtml = getHTML(adminurl)
256- namestart = string.find(adminhtml, 'Publicly Displayed Name:') + 39
257- nameend = string.find(adminhtml, '</B>', namestart)
258- name = adminhtml[namestart:nameend]
259- email = admin + '@users.sourceforge.net'
260- devels[name] = email
261- return devels
262- elif self.repository == 'fm':
263- # We can get a single author and obfuscated email address
264- start = string.find(self.html, '<b>Author:</b>')
265- if start == -1: return None
266- start = start + 18
267- endname = string.find(self.html, '<a href', start)
268- checkForAddrInName = string.find(self.html, '&lt;', start, endname)
269- if checkForAddrInName >= 0:
270- endname = checkForAddrInName
271- name = string.strip(self.html[start:endname])
272- emailstart = string.find(self.html, '<a href', start) + 16
273- emailend = string.find(self.html, '">', emailstart)
274- email = self.html[emailstart:emailend]
275- # unobfuscate email address
276- email = unobfuscate_fm_email(email)
277- return {name: email}
278- else: return None
279-
280-
281- def makeDict(self):
282- self.theDict = {}
283- self.theDict['project'] = self.project
284- #
285- projectname = self.getProjectName()
286- if projectname: self.theDict['projectname'] = projectname
287- #
288- homepage = self.getHomePage()
289- if homepage: self.theDict['homepage'] = homepage
290- #
291- programminglang = self.getProgramminglang()
292- if programminglang: self.theDict['programminglang'] = programminglang
293- else: self.theDict['programminglang'] = []
294- #
295- description = self.getDescription()
296- if description: self.theDict['description'] = description
297- #
298- mailinglist = self.getMailinglist()
299- if mailinglist: self.theDict['list'] = mailinglist
300- else: self.theDict['list'] = []
301- #
302- screenshot = self.getScreenshot()
303- if screenshot: self.theDict['screenshot'] = screenshot
304- #
305- devels = self.getDevels()
306- if devels: self.theDict['devels'] = devels
307- else: self.theDict['devels'] = {}
308-
309- def getDict(self):
310- return self.theDict
311-
312
313=== removed directory 'lib/canonical/doap/ftests'
314=== removed file 'lib/canonical/doap/ftests/__init__.py'
315=== removed file 'lib/canonical/librarian/Makefile'
316--- lib/canonical/librarian/Makefile 2010-04-22 17:30:35 +0000
317+++ lib/canonical/librarian/Makefile 1970-01-01 00:00:00 +0000
318@@ -1,18 +0,0 @@
319-PYTHON_VERSION:=2.5
320-TWISTD:=twistd$(PYTHON_VERSION)
321-
322-PYTHONPATH=../..
323-pythonpath=PYTHONPATH=$(PYTHONPATH)
324-
325-default:
326-
327-tmpdirs=/tmp/fatsam/incoming
328-
329-$(tmpdirs):
330- mkdir -p $@
331-
332-run: $(tmpdirs)
333- $(pythonpath) $(TWISTD) -noy server.tac
334-
335-
336-.PHONY: default run
337
338=== removed directory 'lib/canonical/not-used'
339=== removed directory 'lib/canonical/not-used/hctapi'
340=== removed file 'lib/lp/archivepublisher/library.py'
341--- lib/lp/archivepublisher/library.py 2010-02-09 00:17:40 +0000
342+++ lib/lp/archivepublisher/library.py 1970-01-01 00:00:00 +0000
343@@ -1,145 +0,0 @@
344-# Copyright 2009 Canonical Ltd. This software is licensed under the
345-# GNU Affero General Public License version 3 (see the file LICENSE).
346-#
347-# Librarian class is the Librarian wrapper that provides local file cache
348-
349-# XXX malcc 2006-08-03 bug=55031:
350-# This looks bogus; looks like it's not used, and assumptions about
351-# librarian URLs made here (and provided by the testing mocks) no longer
352-# hold for the real librarian.
353-# Can this whole file and its tests be squashed?
354-
355-from canonical.librarian.client import FileDownloadClient
356-from canonical.librarian.client import FileUploadClient
357-
358-import os
359-
360-class Librarian (object):
361-
362- def __init__(self, host, upload_port, download_port, cache):
363- self.librarian_host = host
364- self.upload_port = upload_port
365- self.download_port = download_port
366- self.cache_path = cache
367- if not os.access(cache, os.F_OK):
368- os.mkdir(cache)
369-
370-
371- def addFile(self, name, size, fileobj, contentType, digest=None,
372- cache=True, uploader=None):
373- """
374- Add a file to the librarian with optional LOCAL CACHE handy
375- optimisation same parameters of original addFile and an optional
376- cache
377-
378- :param cache: Optional boolean in order to allow local cache of File
379- :param uploader: Optional FileUploadClient instance (usefull for test)
380- """
381- if not uploader:
382- uploader = FileUploadClient()
383-
384- uploader.connect(self.librarian_host, self.upload_port)
385-
386- fileid, filealias = uploader.addFile(name, size, fileobj,
387- contentType, digest)
388-
389- if cache:
390- ## return to start of the file
391- fileobj.seek(0,0)
392- self.cacheFile(fileid, filealias, name, fileobj)
393-
394- return fileid, filealias
395-
396- def downloadFileToDisk(self, aliasID, archive, downloader=None):
397- """
398- Download a file from Librarian to our LOCAL CACHE and link to
399- a given file name (major work for publishing in our archive)
400-
401- :param aliasID: Librarian aliasID
402- :param filename: resulted file (/cache/<aliasID> should be linked
403- to filename)
404- :param downloader: Optional FileDownloadClient instance (useful for
405- testing process)
406-
407- """
408- if not downloader:
409- downloader = FileDownloadClient(self.librarian_host,
410- self.download_port)
411-
412- path = downloader.getPathForAlias(aliasID)
413-
414- # XXX: cprov 2004-11-22:
415- # The URL returned from Librarian must be correct
416- # first '/' results in garbage x !!!
417- x, fileid, filealias, name = path.split('/')
418-
419- ## Verify if the file is already cached
420- if not self.isCached(path):
421- ## Grab file from Librarian
422- fp = downloader.getFileByAlias(aliasID)
423-
424- ## Cache it
425- self.cacheFile(fileid, filealias, name, fp)
426-
427- ##Link the cached file to the archive anyway, ensure it !!
428- path = os.path.join(self.cache_path, fileid, filealias, name)
429- self.linkFile(path, archive)
430-
431-
432- def cacheFile(self, fileid, filealias, name, fileobj):
433- ## efective creation of a file in fielsystem
434- # Don't spam the test runner please
435- #print 'Caching file', name
436- path = os.path.join(self.cache_path, fileid)
437- if not os.access(path, os.F_OK):
438- os.mkdir(path)
439- path = os.path.join(path, filealias)
440- if not os.access(path, os.F_OK):
441- os.mkdir(path)
442- filename = os.path.join(path, name)
443- cache = open(filename, "w")
444- content = fileobj.read()
445- cache.write(content)
446- cache.close()
447-
448-
449- def isCached(self, path):
450- filename = os.path.join(self.cache_path, path)
451- return os.access(filename, os.F_OK)
452-
453- def linkFile(self, path, archive):
454- if os.path.exists(archive):
455- os.unlink(archive)
456- return os.link(path, archive)
457-
458-if __name__ == '__main__':
459- import hashlib
460- import os
461- import sys
462-
463- lib = Librarian('localhost', 9090, 8000, "/tmp/cache")
464-
465- name = sys.argv[1]
466- archive = sys.argv[2]
467-
468- print 'Uploading', name, 'to %s:%s' %(lib.librarian_host,
469- lib.upload_port)
470- fileobj = open(name, 'rb')
471- size = os.stat(name).st_size
472- digest = hashlib.sha1(open(name, 'rb').read()).hexdigest()
473-
474- fileid, filealias = lib.addFile(name, size, fileobj,
475- contentType='test/test',
476- digest=digest)
477-
478- print 'Done. File ID:', fileid
479- print 'File AliasID:', filealias
480-
481- lib.downloadFileToDisk(filealias, archive)
482-
483- fp = open(archive, 'r')
484- print 'First 50 bytes:'
485- print repr(fp.read(50))
486-
487-
488-
489
490=== removed file 'lib/lp/archivepublisher/tests/test_librarianwrapper.py'
491--- lib/lp/archivepublisher/tests/test_librarianwrapper.py 2010-02-09 00:17:40 +0000
492+++ lib/lp/archivepublisher/tests/test_librarianwrapper.py 1970-01-01 00:00:00 +0000
493@@ -1,80 +0,0 @@
494-# Copyright 2009 Canonical Ltd. This software is licensed under the
495-# GNU Affero General Public License version 3 (see the file LICENSE).
496-
497-"""Tests for librarian wrapper (lp.archivepublisher.library.py)"""
498-
499-__metaclass__ = type
500-
501-import hashlib
502-import os
503-import shutil
504-import sys
505-import unittest
506-
507-from lp.archivepublisher.tests import datadir
508-
509-from lp.archivepublisher.tests.util import (
510- FakeDownloadClient, FakeUploadClient)
511-
512-
513-class TestLibrarianWrapper(unittest.TestCase):
514-
515- def setUp(self):
516- ## Create archive and cache dir ...
517- os.mkdir(datadir('archive'))
518- os.mkdir(datadir('cache'))
519-
520- def tearDown(self):
521- shutil.rmtree(datadir('archive'))
522- shutil.rmtree(datadir('cache'))
523-
524- def testImport(self):
525- """Librarian should be importable"""
526- from lp.archivepublisher.library import Librarian
527-
528- def testInstatiate(self):
529- """Librarian should be instantiatable"""
530- from lp.archivepublisher.library import Librarian
531- lib = Librarian('localhost', 9090, 8000, datadir('cache'))
532-
533- def testUpload(self):
534- """Librarian Upload"""
535- name = 'ed_0.2-20.dsc'
536- path = datadir(name)
537-
538- from lp.archivepublisher.library import Librarian
539- lib = Librarian('localhost', 9090, 8000, datadir('cache'))
540-
541- fileobj = open(path, 'rb')
542- size = os.stat(path).st_size
543- digest = hashlib.sha1(open(path, 'rb').read()).hexdigest()
544-
545- ## Use Fake Librarian class
546- uploader = FakeUploadClient()
547-
548- fileid, filealias = lib.addFile(name, size, fileobj,
549- contentType='test/test',
550- digest=digest,
551- uploader=uploader)
552- #print 'ID %s ALIAS %s' %(fileid, filealias)
553-
554- cached = os.path.join(datadir('cache'), name)
555- os.path.exists(cached)
556-
557- def testDownload(self):
558- """Librarian DownloadToDisk process"""
559- filealias = '1'
560- archive = os.path.join (datadir('archive'), 'test')
561-
562- from lp.archivepublisher.library import Librarian
563- lib = Librarian('localhost', 9090, 8000, datadir('cache'))
564- ## Use Fake Librarian Class
565- downloader = FakeDownloadClient()
566-
567- lib.downloadFileToDisk(filealias, archive, downloader=downloader)
568-
569- os.path.exists(archive)
570-
571-
572-def test_suite():
573- return unittest.TestLoader().loadTestsFromName(__name__)
574
575=== modified file 'lib/lp/archivepublisher/tests/util.py'
576--- lib/lp/archivepublisher/tests/util.py 2009-12-13 11:55:40 +0000
577+++ lib/lp/archivepublisher/tests/util.py 2010-04-28 03:27:32 +0000
578@@ -216,34 +216,6 @@
579 return thing # Assume we can't copy it deeply
580
581
582-class FakeDownloadClient:
583- """Fake up a FileDownloadClient for the tests"""
584- def __init__(self):
585- pass
586-
587- def getFileByAlias(self, alias):
588- """Fake this up by returning data/aliases/alias"""
589- return file("%s/%s" % (datadir("aliases"), alias), "r")
590-
591- def getPathForAlias(self, alias):
592- """Fake this up by returning the PATH 'alias/alias/alias'"""
593- return "/%s/%s/%s" % (alias, alias, alias)
594-
595-
596-class FakeUploadClient:
597- """Fake up a FileUploadClient for the tests"""
598- def __init__(self):
599- pass
600-
601- def connect(self, host, port):
602- pass
603-
604- def addFile(self, name, size, fileobj, contentType, digest):
605- fileid = '1'
606- filealias = '1'
607- return fileid, filealias
608-
609-
610 # NOTE: If you alter the configs here remember to add tests in test_config.py
611 fake_ubuntu = FakeDistribution("ubuntu",
612 """
613
614=== removed file 'lib/psycopg.py'
615--- lib/psycopg.py 2009-06-25 05:59:58 +0000
616+++ lib/psycopg.py 1970-01-01 00:00:00 +0000
617@@ -1,9 +0,0 @@
618-# Copyright 2009 Canonical Ltd. This software is licensed under the
619-# GNU Affero General Public License version 3 (see the file LICENSE).
620-
621-"""This is not Psycopg 1."""
622-
623-class Psycopg1Imported(ImportError):
624- pass
625-
626-raise Psycopg1Imported('Importing Psycopg 1.x is forbidden')