Merge lp:~wgrant/launchpad/delete-more-stuff into lp:launchpad

Proposed by William Grant
Status: Merged
Approved by: Gavin Panella
Approved revision: no longer in the source branch.
Merged at revision: not available
Proposed branch: lp:~wgrant/launchpad/delete-more-stuff
Merge into: lp:launchpad
Diff against target: 626 lines (+0/-576)
8 files modified
lib/canonical/Makefile (+0/-5)
lib/canonical/doap/__init__.py (+0/-8)
lib/canonical/doap/forage.py (+0/-283)
lib/canonical/librarian/Makefile (+0/-18)
lib/lp/archivepublisher/library.py (+0/-145)
lib/lp/archivepublisher/tests/test_librarianwrapper.py (+0/-80)
lib/lp/archivepublisher/tests/util.py (+0/-28)
lib/psycopg.py (+0/-9)
To merge this branch: bzr merge lp:~wgrant/launchpad/delete-more-stuff
Reviewer Review Type Date Requested Status
Gavin Panella (community) Approve
Review via email: mp+24280@code.launchpad.net

Commit message

Remove more unused bits and pieces.

Description of the change

This removes a few more unused bits and pieces from around the tree:

 - Two broken Makefiles. lib/canonical/Makefile had its only statement commented out. lib/canonical/librarian/Makefile referenced long-gone TACs and obsolete directories -- it hasn't been useful or even worked for a long time.
 - lib/canonical/doap is long-obsolete, with its only remaining non-empty file being unused and unmodified since 2004.
 - lp.archivepublisher.library has been unused and broken for four or five years, with a comment at the top from four years ago suggesting that it be removed.
 - lib/psycopg.py raises an exception on import, and appears to date from the pyscopg2 migration days. Nothing has imported it in a long time, so it's just clutter now.
 - lib/canonical/not-used is... not used. It had a single empty directory inside it.

To post a comment you must log in.
Revision history for this message
Gavin Panella (allenap) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== removed file 'lib/canonical/Makefile'
--- lib/canonical/Makefile 2005-10-31 18:29:12 +0000
+++ lib/canonical/Makefile 1970-01-01 00:00:00 +0000
@@ -1,5 +0,0 @@
1check:
2# python ../../test_on_merge.py canonical.lp
3
4.PHONY: check
5
60
=== removed directory 'lib/canonical/doap'
=== removed file 'lib/canonical/doap/__init__.py'
--- lib/canonical/doap/__init__.py 2009-06-25 05:39:50 +0000
+++ lib/canonical/doap/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,8 +0,0 @@
1# Copyright 2009 Canonical Ltd. This software is licensed under the
2# GNU Affero General Public License version 3 (see the file LICENSE).
3
4"""This is the canonical.doap Python package.
5
6The DOAP (Description Of A Project) subsystem of Launchpad tracks projects,
7products, product releases and series' of releases.
8"""
90
=== removed file 'lib/canonical/doap/forage.py'
--- lib/canonical/doap/forage.py 2009-06-25 05:30:52 +0000
+++ lib/canonical/doap/forage.py 1970-01-01 00:00:00 +0000
@@ -1,283 +0,0 @@
1# Copyright 2009 Canonical Ltd. This software is licensed under the
2# GNU Affero General Public License version 3 (see the file LICENSE).
3
4# Retrieve project details from sourceforge / freshmeat.
5
6import urllib2
7import re
8import string
9
10# Constants
11Error = 'sourceforge.py error'
12
13def getProjectSpec(project, repository='sf'):
14 page = ProjectPage(project, repository)
15 #page.makeDict()
16 return page.getDict()
17
18def makeURL(project, repository='sf'):
19 if repository=='sf':
20 url = 'http://sourceforge.net/projects/'+project+'/'
21 elif repository=='fm':
22 url = 'http://freshmeat.net/projects/'+project+'/'
23 else: raise Error, 'invalid repository: '+repository
24 return url
25
26def getHTML(url):
27 try: urlobj = urllib2.urlopen(url)
28 except urllib2.HTTPError: return None
29 html = urlobj.read()
30 urlobj.close()
31 return html
32
33def unobfuscate_fm_email(email):
34 delimiters = [[' [', '] '], [' |', '| '], [' (',') '], [' __','__ '], [' __dash__ ',' __dash__ '], [' |dash| ',' |dash| '], [' [dash] ',' [dash] '], [' (dash) ',' (dash) ']]
35 symbols = {'at': '@', 'dot': '.'}
36 for symbol in symbols.keys():
37 for delimiter in delimiters:
38 email = string.join(string.split(email, delimiter[0]+symbol+delimiter[1]), symbols[symbol])
39 return email
40
41class ProjectPage:
42 def __init__(self, project, repository='sf'):
43 self.project = project
44 self.repository = repository
45 self.url = makeURL(self.project, self.repository)
46 self.html = getHTML(self.url)
47 if self.html == None: raise Error, 'Could not retrieve project details - perhaps project not found on '+self.repository
48 self.theDict = {}
49 if self.repository == 'sf':
50 if string.find(self.html, 'Invalid Project') > -1:
51 raise Error, 'Project not found on '+self.repository
52 elif self.repository == 'fm':
53 if string.find(self.html, 'The project name you specified could not be found in our database') > -1:
54 raise Error, 'Project not found on '+self.repository
55 self.makeDict()
56
57 def getProjectName(self):
58 if self.repository == 'sf':
59 result = re.search('Project: .*Summary', self.html)
60 s = self.html[result.start()+9:result.end()-9]
61 return s
62 else:
63 return None
64
65
66 def getDescription(self):
67 if self.repository == 'sf':
68 start = string.find(self.html, 'Summary</A>')
69 if start == -1: return None
70 start = string.find(self.html, '<TABLE', start)
71 start = string.find(self.html, '<p>', start)
72 end = string.find(self.html, '<p>', start+1)
73 s = self.html[start+3:end]
74 s = string.strip(s)
75 s = string.join(string.split(s, '\r\n'), ' ')
76 return s
77 elif self.repository == 'fm':
78 start = string.find(self.html, '<b>About:</b>')
79 if start == -1: return None
80 start = string.find(self.html, '<br>', start)
81 end = string.find(self.html, '<p>', start)
82 s = self.html[start+4:end]
83 s = string.strip(s)
84 s = string.join(string.split(s, '\r\n'), ' ')
85 return s
86 else:
87 return None
88
89 def getHomePage(self):
90 if self.repository == 'sf':
91 result = re.search('href.*Home\ Page', self.html)
92 if result == None: return None
93 s = self.html[result.start()+6:result.end()-11]
94 return s
95 elif self.repository == 'fm':
96 start = string.find(self.html, 'Homepage:')
97 if start == -1: return None
98 start = string.find(self.html, 'http://', start)
99 end = string.find(self.html, '</a>', start)
100 return self.html[start:end]
101 else:
102 return None
103
104 def getProgramminglang(self):
105 if self.repository == 'sf':
106 result = re.search('Programming\ Language.*BR>', self.html)
107 if result == None: return None
108 langstring = self.html[result.start()+22:result.end()]
109 # Find first BR
110 end = string.find(langstring, '<BR>')
111 langstring = langstring[:end]
112 # split up, remove <A...> tags
113 langlist1 = string.split(langstring, ',')
114 langlist = []
115 for lang in langlist1:
116 start = string.find(lang, '>')
117 lang = lang[start+1:]
118 end = string.find(lang, '<')
119 lang = lang[:end]
120 langlist.append(lang)
121 return langlist
122 elif self.repository == 'fm':
123 start = string.find(self.html, '[Programming Language]')
124 if start == -1: return None
125 start = string.find(self.html, '<td', start)
126 start = string.find(self.html, '<td', start+1)
127 end = string.find(self.html, '</td>', start)
128 langstring = self.html[start:end]
129 langlist1 = string.split(langstring, ',')
130 langlist = []
131 for lang in langlist1:
132 start = string.find(lang, '<small>')
133 start = start + 8
134 end = string.find(lang, '<', start)
135 lang = lang[start:end]
136 langlist.append(lang)
137 return langlist
138 else:
139 return None
140
141 def getMailinglist(self):
142 # Check for mailing list page
143 if self.repository == 'sf':
144 start = string.find(self.html, '&nbsp;Mailing Lists</A>')
145 if start == -1: return None
146 start = string.rfind(self.html, '/mail/?', 0, start)
147 end = string.find(self.html, '"', start+1)
148 listURL = 'http://sourceforge.net' + self.html[start:end]
149 # fetch mailing list page
150 self.listpage = getHTML(listURL)
151 # Extract mailing list URLs
152 start = 0
153 urls = []
154 while start >= 0:
155 start = string.find(self.listpage, 'Subscribe/Unsubscribe/Preferences', start+1)
156 if start >= 0:
157 urlstart = string.rfind(self.listpage, 'http://lists.sourceforge', 0, start)
158 urlend = start - 2
159 url = self.listpage[urlstart:urlend]
160 urls.append(url)
161 # Construct return list
162 if urls: return urls
163 else: return None
164 elif self.repository == 'fm':
165 #
166 # Note: for FreshMeat, this currently only works for projects that point
167 # to a sourceforge page for the mailing lists.
168 # Other projects point to an arbitrary page somewhere else that
169 # cannot be parsed without further information.
170 #
171 start = string.find(self.html, 'Mailing list archive:</b>')
172 if start == -1: return None
173 end = string.find(self.html, '</a>', start)
174 start = string.find(self.html, 'http://sourceforge.net/mail/', start, end)
175 if start == -1: return None
176 listURL = self.html[start:end]
177 # fetch mailing list page
178 self.listpage = getHTML(listURL)
179 # Extract mailing list URLs
180 start = 0
181 urls = []
182 while start >= 0:
183 start = string.find(self.listpage, 'Subscribe/Unsubscribe/Preferences', start+1)
184 if start >= 0:
185 urlstart = string.rfind(self.listpage, 'http://lists.sourceforge', 0, start)
186 urlend = start - 2
187 url = self.listpage[urlstart:urlend]
188 urls.append(url)
189 # Construct return list
190 if urls: return urls
191 else: return None
192
193 else:
194 return None
195
196 def getScreenshot(self):
197 # only freshmeat has screenshots
198 if self.repository == 'sf':
199 return None
200 elif self.repository == 'fm':
201 start = string.find(self.html, '<a target="screenshot"')
202 if start == -1: return None
203 start = string.find(self.html, 'href="/screenshots/', start)
204 end = string.find(self.html, '/">', start)
205 ssurl = 'http://freshmeat.net' + self.html[start+6:end+1]
206 return ssurl
207 else: return None
208
209 def getDevels(self):
210 if self.repository == 'sf':
211 # We can get list of project admins with @sf.net emails
212 start = string.find(self.html, 'Project Admins:</SPAN>')
213 if start == -1: return None
214 end = string.find(self.html, '<SPAN CLASS="develtitle">Developers', start)
215 adminhtml = self.html[start:end]
216 admins = []
217 adminstart = 0
218 while adminstart >= 0:
219 adminstart = string.find(adminhtml, '<a href="/users/', adminstart + 1)
220 if adminstart >= 0:
221 adminend = string.find(adminhtml, '">', adminstart)
222 adminurl = adminhtml[adminstart+16:adminend-1]
223 admins.append(adminurl)
224 devels = {}
225 for admin in admins:
226 adminurl = 'http://sourceforge.net/users/' + admin + '/'
227 adminhtml = getHTML(adminurl)
228 namestart = string.find(adminhtml, 'Publicly Displayed Name:') + 39
229 nameend = string.find(adminhtml, '</B>', namestart)
230 name = adminhtml[namestart:nameend]
231 email = admin + '@users.sourceforge.net'
232 devels[name] = email
233 return devels
234 elif self.repository == 'fm':
235 # We can get a single author and obfuscated email address
236 start = string.find(self.html, '<b>Author:</b>')
237 if start == -1: return None
238 start = start + 18
239 endname = string.find(self.html, '<a href', start)
240 checkForAddrInName = string.find(self.html, '&lt;', start, endname)
241 if checkForAddrInName >= 0:
242 endname = checkForAddrInName
243 name = string.strip(self.html[start:endname])
244 emailstart = string.find(self.html, '<a href', start) + 16
245 emailend = string.find(self.html, '">', emailstart)
246 email = self.html[emailstart:emailend]
247 # unobfuscate email address
248 email = unobfuscate_fm_email(email)
249 return {name: email}
250 else: return None
251
252
253 def makeDict(self):
254 self.theDict = {}
255 self.theDict['project'] = self.project
256 #
257 projectname = self.getProjectName()
258 if projectname: self.theDict['projectname'] = projectname
259 #
260 homepage = self.getHomePage()
261 if homepage: self.theDict['homepage'] = homepage
262 #
263 programminglang = self.getProgramminglang()
264 if programminglang: self.theDict['programminglang'] = programminglang
265 else: self.theDict['programminglang'] = []
266 #
267 description = self.getDescription()
268 if description: self.theDict['description'] = description
269 #
270 mailinglist = self.getMailinglist()
271 if mailinglist: self.theDict['list'] = mailinglist
272 else: self.theDict['list'] = []
273 #
274 screenshot = self.getScreenshot()
275 if screenshot: self.theDict['screenshot'] = screenshot
276 #
277 devels = self.getDevels()
278 if devels: self.theDict['devels'] = devels
279 else: self.theDict['devels'] = {}
280
281 def getDict(self):
282 return self.theDict
283
2840
=== removed directory 'lib/canonical/doap/ftests'
=== removed file 'lib/canonical/doap/ftests/__init__.py'
=== removed file 'lib/canonical/librarian/Makefile'
--- lib/canonical/librarian/Makefile 2010-04-22 17:30:35 +0000
+++ lib/canonical/librarian/Makefile 1970-01-01 00:00:00 +0000
@@ -1,18 +0,0 @@
1PYTHON_VERSION:=2.5
2TWISTD:=twistd$(PYTHON_VERSION)
3
4PYTHONPATH=../..
5pythonpath=PYTHONPATH=$(PYTHONPATH)
6
7default:
8
9tmpdirs=/tmp/fatsam/incoming
10
11$(tmpdirs):
12 mkdir -p $@
13
14run: $(tmpdirs)
15 $(pythonpath) $(TWISTD) -noy server.tac
16
17
18.PHONY: default run
190
=== removed directory 'lib/canonical/not-used'
=== removed directory 'lib/canonical/not-used/hctapi'
=== removed file 'lib/lp/archivepublisher/library.py'
--- lib/lp/archivepublisher/library.py 2010-02-09 00:17:40 +0000
+++ lib/lp/archivepublisher/library.py 1970-01-01 00:00:00 +0000
@@ -1,145 +0,0 @@
1# Copyright 2009 Canonical Ltd. This software is licensed under the
2# GNU Affero General Public License version 3 (see the file LICENSE).
3#
4# Librarian class is the Librarian wrapper that provides local file cache
5
6# XXX malcc 2006-08-03 bug=55031:
7# This looks bogus; looks like it's not used, and assumptions about
8# librarian URLs made here (and provided by the testing mocks) no longer
9# hold for the real librarian.
10# Can this whole file and its tests be squashed?
11
12from canonical.librarian.client import FileDownloadClient
13from canonical.librarian.client import FileUploadClient
14
15import os
16
17class Librarian (object):
18
19 def __init__(self, host, upload_port, download_port, cache):
20 self.librarian_host = host
21 self.upload_port = upload_port
22 self.download_port = download_port
23 self.cache_path = cache
24 if not os.access(cache, os.F_OK):
25 os.mkdir(cache)
26
27
28 def addFile(self, name, size, fileobj, contentType, digest=None,
29 cache=True, uploader=None):
30 """
31 Add a file to the librarian with optional LOCAL CACHE handy
32 optimisation same parameters of original addFile and an optional
33 cache
34
35 :param cache: Optional boolean in order to allow local cache of File
36 :param uploader: Optional FileUploadClient instance (usefull for test)
37 """
38 if not uploader:
39 uploader = FileUploadClient()
40
41 uploader.connect(self.librarian_host, self.upload_port)
42
43 fileid, filealias = uploader.addFile(name, size, fileobj,
44 contentType, digest)
45
46 if cache:
47 ## return to start of the file
48 fileobj.seek(0,0)
49 self.cacheFile(fileid, filealias, name, fileobj)
50
51 return fileid, filealias
52
53 def downloadFileToDisk(self, aliasID, archive, downloader=None):
54 """
55 Download a file from Librarian to our LOCAL CACHE and link to
56 a given file name (major work for publishing in our archive)
57
58 :param aliasID: Librarian aliasID
59 :param filename: resulted file (/cache/<aliasID> should be linked
60 to filename)
61 :param downloader: Optional FileDownloadClient instance (useful for
62 testing process)
63
64 """
65 if not downloader:
66 downloader = FileDownloadClient(self.librarian_host,
67 self.download_port)
68
69 path = downloader.getPathForAlias(aliasID)
70
71 # XXX: cprov 2004-11-22:
72 # The URL returned from Librarian must be correct
73 # first '/' results in garbage x !!!
74 x, fileid, filealias, name = path.split('/')
75
76 ## Verify if the file is already cached
77 if not self.isCached(path):
78 ## Grab file from Librarian
79 fp = downloader.getFileByAlias(aliasID)
80
81 ## Cache it
82 self.cacheFile(fileid, filealias, name, fp)
83
84 ##Link the cached file to the archive anyway, ensure it !!
85 path = os.path.join(self.cache_path, fileid, filealias, name)
86 self.linkFile(path, archive)
87
88
89 def cacheFile(self, fileid, filealias, name, fileobj):
90 ## efective creation of a file in fielsystem
91 # Don't spam the test runner please
92 #print 'Caching file', name
93 path = os.path.join(self.cache_path, fileid)
94 if not os.access(path, os.F_OK):
95 os.mkdir(path)
96 path = os.path.join(path, filealias)
97 if not os.access(path, os.F_OK):
98 os.mkdir(path)
99 filename = os.path.join(path, name)
100 cache = open(filename, "w")
101 content = fileobj.read()
102 cache.write(content)
103 cache.close()
104
105
106 def isCached(self, path):
107 filename = os.path.join(self.cache_path, path)
108 return os.access(filename, os.F_OK)
109
110 def linkFile(self, path, archive):
111 if os.path.exists(archive):
112 os.unlink(archive)
113 return os.link(path, archive)
114
115if __name__ == '__main__':
116 import hashlib
117 import os
118 import sys
119
120 lib = Librarian('localhost', 9090, 8000, "/tmp/cache")
121
122 name = sys.argv[1]
123 archive = sys.argv[2]
124
125 print 'Uploading', name, 'to %s:%s' %(lib.librarian_host,
126 lib.upload_port)
127 fileobj = open(name, 'rb')
128 size = os.stat(name).st_size
129 digest = hashlib.sha1(open(name, 'rb').read()).hexdigest()
130
131 fileid, filealias = lib.addFile(name, size, fileobj,
132 contentType='test/test',
133 digest=digest)
134
135 print 'Done. File ID:', fileid
136 print 'File AliasID:', filealias
137
138 lib.downloadFileToDisk(filealias, archive)
139
140 fp = open(archive, 'r')
141 print 'First 50 bytes:'
142 print repr(fp.read(50))
143
144
145
1460
=== removed file 'lib/lp/archivepublisher/tests/test_librarianwrapper.py'
--- lib/lp/archivepublisher/tests/test_librarianwrapper.py 2010-02-09 00:17:40 +0000
+++ lib/lp/archivepublisher/tests/test_librarianwrapper.py 1970-01-01 00:00:00 +0000
@@ -1,80 +0,0 @@
1# Copyright 2009 Canonical Ltd. This software is licensed under the
2# GNU Affero General Public License version 3 (see the file LICENSE).
3
4"""Tests for librarian wrapper (lp.archivepublisher.library.py)"""
5
6__metaclass__ = type
7
8import hashlib
9import os
10import shutil
11import sys
12import unittest
13
14from lp.archivepublisher.tests import datadir
15
16from lp.archivepublisher.tests.util import (
17 FakeDownloadClient, FakeUploadClient)
18
19
20class TestLibrarianWrapper(unittest.TestCase):
21
22 def setUp(self):
23 ## Create archive and cache dir ...
24 os.mkdir(datadir('archive'))
25 os.mkdir(datadir('cache'))
26
27 def tearDown(self):
28 shutil.rmtree(datadir('archive'))
29 shutil.rmtree(datadir('cache'))
30
31 def testImport(self):
32 """Librarian should be importable"""
33 from lp.archivepublisher.library import Librarian
34
35 def testInstatiate(self):
36 """Librarian should be instantiatable"""
37 from lp.archivepublisher.library import Librarian
38 lib = Librarian('localhost', 9090, 8000, datadir('cache'))
39
40 def testUpload(self):
41 """Librarian Upload"""
42 name = 'ed_0.2-20.dsc'
43 path = datadir(name)
44
45 from lp.archivepublisher.library import Librarian
46 lib = Librarian('localhost', 9090, 8000, datadir('cache'))
47
48 fileobj = open(path, 'rb')
49 size = os.stat(path).st_size
50 digest = hashlib.sha1(open(path, 'rb').read()).hexdigest()
51
52 ## Use Fake Librarian class
53 uploader = FakeUploadClient()
54
55 fileid, filealias = lib.addFile(name, size, fileobj,
56 contentType='test/test',
57 digest=digest,
58 uploader=uploader)
59 #print 'ID %s ALIAS %s' %(fileid, filealias)
60
61 cached = os.path.join(datadir('cache'), name)
62 os.path.exists(cached)
63
64 def testDownload(self):
65 """Librarian DownloadToDisk process"""
66 filealias = '1'
67 archive = os.path.join (datadir('archive'), 'test')
68
69 from lp.archivepublisher.library import Librarian
70 lib = Librarian('localhost', 9090, 8000, datadir('cache'))
71 ## Use Fake Librarian Class
72 downloader = FakeDownloadClient()
73
74 lib.downloadFileToDisk(filealias, archive, downloader=downloader)
75
76 os.path.exists(archive)
77
78
79def test_suite():
80 return unittest.TestLoader().loadTestsFromName(__name__)
810
=== modified file 'lib/lp/archivepublisher/tests/util.py'
--- lib/lp/archivepublisher/tests/util.py 2009-12-13 11:55:40 +0000
+++ lib/lp/archivepublisher/tests/util.py 2010-04-28 03:27:32 +0000
@@ -216,34 +216,6 @@
216 return thing # Assume we can't copy it deeply216 return thing # Assume we can't copy it deeply
217217
218218
219class FakeDownloadClient:
220 """Fake up a FileDownloadClient for the tests"""
221 def __init__(self):
222 pass
223
224 def getFileByAlias(self, alias):
225 """Fake this up by returning data/aliases/alias"""
226 return file("%s/%s" % (datadir("aliases"), alias), "r")
227
228 def getPathForAlias(self, alias):
229 """Fake this up by returning the PATH 'alias/alias/alias'"""
230 return "/%s/%s/%s" % (alias, alias, alias)
231
232
233class FakeUploadClient:
234 """Fake up a FileUploadClient for the tests"""
235 def __init__(self):
236 pass
237
238 def connect(self, host, port):
239 pass
240
241 def addFile(self, name, size, fileobj, contentType, digest):
242 fileid = '1'
243 filealias = '1'
244 return fileid, filealias
245
246
247# NOTE: If you alter the configs here remember to add tests in test_config.py219# NOTE: If you alter the configs here remember to add tests in test_config.py
248fake_ubuntu = FakeDistribution("ubuntu",220fake_ubuntu = FakeDistribution("ubuntu",
249 """221 """
250222
=== removed file 'lib/psycopg.py'
--- lib/psycopg.py 2009-06-25 05:59:58 +0000
+++ lib/psycopg.py 1970-01-01 00:00:00 +0000
@@ -1,9 +0,0 @@
1# Copyright 2009 Canonical Ltd. This software is licensed under the
2# GNU Affero General Public License version 3 (see the file LICENSE).
3
4"""This is not Psycopg 1."""
5
6class Psycopg1Imported(ImportError):
7 pass
8
9raise Psycopg1Imported('Importing Psycopg 1.x is forbidden')