diff options
author | Richard Purdie <rpurdie@linux.intel.com> | 2010-08-27 15:14:24 +0100 |
---|---|---|
committer | Richard Purdie <rpurdie@linux.intel.com> | 2010-08-27 15:29:45 +0100 |
commit | 29d6678fd546377459ef75cf54abeef5b969b5cf (patch) | |
tree | 8edd65790e37a00d01c3f203f773fe4b5012db18 /meta/packages/python/python-urlgrabber | |
parent | da49de6885ee1bc424e70bc02f21f6ab920efb55 (diff) | |
download | openembedded-core-29d6678fd546377459ef75cf54abeef5b969b5cf.tar.gz openembedded-core-29d6678fd546377459ef75cf54abeef5b969b5cf.tar.bz2 openembedded-core-29d6678fd546377459ef75cf54abeef5b969b5cf.zip |
Major layout change to the packages directory
Having one monolithic packages directory makes it hard to find things
and is generally overwhelming. This commit splits it into several
logical sections roughly based on function, recipes.txt gives more
information about the classifications used.
The opportunity is also used to switch from "packages" to "recipes"
as used in OpenEmbedded as the term "packages" can be confusing to
people and has many different meanings.
Not all recipes have been classified yet, this is just a first pass
at separating things out. Some packages are moved to meta-extras as
they're no longer actively used or maintained.
Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
Diffstat (limited to 'meta/packages/python/python-urlgrabber')
3 files changed, 0 insertions, 185 deletions
diff --git a/meta/packages/python/python-urlgrabber/urlgrabber-3.0.0-cleanup.patch b/meta/packages/python/python-urlgrabber/urlgrabber-3.0.0-cleanup.patch deleted file mode 100644 index 7a1ee059d1..0000000000 --- a/meta/packages/python/python-urlgrabber/urlgrabber-3.0.0-cleanup.patch +++ /dev/null @@ -1,28 +0,0 @@ -diff -up urlgrabber-3.0.0/urlgrabber/grabber.py.cleanup urlgrabber-3.0.0/urlgrabber/grabber.py ---- urlgrabber-3.0.0/urlgrabber/grabber.py.cleanup 2007-11-29 10:25:13.000000000 +0000 -+++ urlgrabber-3.0.0/urlgrabber/grabber.py 2007-11-29 10:26:15.000000000 +0000 -@@ -1204,16 +1204,18 @@ class URLGrabberFileObject: - bs = 1024*8 - size = 0 - -- if amount is not None: bs = min(bs, amount - size) -- block = self.read(bs) -- size = size + len(block) -- while block: -- new_fo.write(block) -+ try: - if amount is not None: bs = min(bs, amount - size) - block = self.read(bs) - size = size + len(block) -+ while block: -+ new_fo.write(block) -+ if amount is not None: bs = min(bs, amount - size) -+ block = self.read(bs) -+ size = size + len(block) -+ finally: -+ new_fo.close() - -- new_fo.close() - try: - modified_tuple = self.hdr.getdate_tz('last-modified') - modified_stamp = rfc822.mktime_tz(modified_tuple) diff --git a/meta/packages/python/python-urlgrabber/urlgrabber-HEAD.patch b/meta/packages/python/python-urlgrabber/urlgrabber-HEAD.patch deleted file mode 100644 index 90180d29a0..0000000000 --- a/meta/packages/python/python-urlgrabber/urlgrabber-HEAD.patch +++ /dev/null @@ -1,142 +0,0 @@ -diff --git a/urlgrabber/grabber.py b/urlgrabber/grabber.py -index e090e90..a26880c 100644 ---- a/urlgrabber/grabber.py -+++ b/urlgrabber/grabber.py -@@ -439,6 +439,12 @@ try: - except: - __version__ = '???' - -+try: -+ # this part isn't going to do much - need to talk to gettext -+ from i18n import _ -+except ImportError, msg: -+ def _(st): return st -+ - ######################################################################## - # functions for debugging output. These functions are here because they - # are also part of the module initialization. -@@ -1052,7 +1058,8 @@ class PyCurlFileObject(): - self._reget_length = 0 - self._prog_running = False - self._error = (None, None) -- self.size = None -+ self.size = 0 -+ self._hdr_ended = False - self._do_open() - - -@@ -1085,9 +1092,14 @@ class PyCurlFileObject(): - return -1 - - def _hdr_retrieve(self, buf): -+ if self._hdr_ended: -+ self._hdr_dump = '' -+ self.size = 0 -+ self._hdr_ended = False -+ - if self._over_max_size(cur=len(self._hdr_dump), - max_size=self.opts.max_header_size): -- return -1 -+ return -1 - try: - self._hdr_dump += buf - # we have to get the size before we do the progress obj start -@@ -1104,7 +1116,17 @@ class PyCurlFileObject(): - s = parse150(buf) - if s: - self.size = int(s) -- -+ -+ if buf.lower().find('location') != -1: -+ location = ':'.join(buf.split(':')[1:]) -+ location = location.strip() -+ self.scheme = urlparse.urlsplit(location)[0] -+ self.url = location -+ -+ if len(self._hdr_dump) != 0 and buf == '\r\n': -+ self._hdr_ended = True -+ if DEBUG: DEBUG.info('header ended:') -+ - return len(buf) - except KeyboardInterrupt: - return pycurl.READFUNC_ABORT -@@ -1136,6 +1158,7 @@ class PyCurlFileObject(): - self.curl_obj.setopt(pycurl.PROGRESSFUNCTION, self._progress_update) - self.curl_obj.setopt(pycurl.FAILONERROR, True) - self.curl_obj.setopt(pycurl.OPT_FILETIME, True) -+ self.curl_obj.setopt(pycurl.FOLLOWLOCATION, True) - - if DEBUG: - self.curl_obj.setopt(pycurl.VERBOSE, True) -@@ -1291,7 +1314,12 @@ class PyCurlFileObject(): - raise err - - elif str(e.args[1]) == '' and self.http_code != 0: # fake it until you make it -- msg = 'HTTP Error %s : %s ' % (self.http_code, self.url) -+ if self.scheme in ['http', 'https']: -+ msg = 'HTTP Error %s : %s ' % (self.http_code, self.url) -+ elif self.scheme in ['ftp']: -+ msg = 'FTP Error %s : %s ' % (self.http_code, self.url) -+ else: -+ msg = "Unknown Error: URL=%s , scheme=%s" % (self.url, self.scheme) - else: - msg = 'PYCURL ERROR %s - "%s"' % (errcode, str(e.args[1])) - code = errcode -@@ -1299,6 +1327,12 @@ class PyCurlFileObject(): - err.code = code - err.exception = e - raise err -+ else: -+ if self._error[1]: -+ msg = self._error[1] -+ err = URLGRabError(14, msg) -+ err.url = self.url -+ raise err - - def _do_open(self): - self.curl_obj = _curl_cache -@@ -1532,11 +1566,14 @@ class PyCurlFileObject(): - def _over_max_size(self, cur, max_size=None): - - if not max_size: -- max_size = self.size -- if self.opts.size: # if we set an opts size use that, no matter what -- max_size = self.opts.size -+ if not self.opts.size: -+ max_size = self.size -+ else: -+ max_size = self.opts.size -+ - if not max_size: return False # if we have None for all of the Max then this is dumb -- if cur > max_size + max_size*.10: -+ -+ if cur > int(float(max_size) * 1.10): - - msg = _("Downloaded more than max size for %s: %s > %s") \ - % (self.url, cur, max_size) -@@ -1582,7 +1619,11 @@ class PyCurlFileObject(): - self.opts.progress_obj.end(self._amount_read) - self.fo.close() - -- -+ def geturl(self): -+ """ Provide the geturl() method, used to be got from -+ urllib.addinfourl, via. urllib.URLopener.* """ -+ return self.url -+ - _curl_cache = pycurl.Curl() # make one and reuse it over and over and over - - -diff --git a/urlgrabber/progress.py b/urlgrabber/progress.py -index dd07c6a..45eb248 100644 ---- a/urlgrabber/progress.py -+++ b/urlgrabber/progress.py -@@ -658,6 +658,8 @@ def format_time(seconds, use_hours=0): - if seconds is None or seconds < 0: - if use_hours: return '--:--:--' - else: return '--:--' -+ elif seconds == float('inf'): -+ return 'Infinite' - else: - seconds = int(seconds) - minutes = seconds / 60 diff --git a/meta/packages/python/python-urlgrabber/urlgrabber-reset.patch b/meta/packages/python/python-urlgrabber/urlgrabber-reset.patch deleted file mode 100644 index b63e7c33ac..0000000000 --- a/meta/packages/python/python-urlgrabber/urlgrabber-reset.patch +++ /dev/null @@ -1,15 +0,0 @@ ---- a/urlgrabber/grabber.py 2010-02-19 14:50:45.000000000 -0500 -+++ b/urlgrabber/grabber.py 2010-02-19 14:51:28.000000000 -0500 -@@ -1626,6 +1626,12 @@ - - _curl_cache = pycurl.Curl() # make one and reuse it over and over and over - -+def reset_curl_obj(): -+ """To make sure curl has reread the network/dns info we force a reload""" -+ global _curl_cache -+ _curl_cache.close() -+ _curl_cache = pycurl.Curl() -+ - - ##################################################################### - # DEPRECATED FUNCTIONS |