Browse Source

python-urlgrabber package update

Signed-off-by: basebuilder_pel7ppc64bebuilder0 <basebuilder@powerel.org>
master
basebuilder_pel7ppc64bebuilder0 6 years ago
parent
commit
84a17b227e
  1. 60
      SOURCES/BZ-1017491-respond-to-ctrl-c.patch
  2. 18
      SOURCES/BZ-1082648-curl-77-error-message.patch
  3. 19
      SOURCES/BZ-1099101-revert-curl-ctrl-c.patch
  4. 24
      SOURCES/BZ-1233329-timedhosts-parsing-error-handling.patch
  5. 279
      SOURCES/BZ-1342179-add-retry-no-cache-opt.patch
  6. 31
      SOURCES/BZ-853432-single-conn-reset.patch
  7. 365
      SPECS/python-urlgrabber.spec

60
SOURCES/BZ-1017491-respond-to-ctrl-c.patch

@ -0,0 +1,60 @@ @@ -0,0 +1,60 @@
commit 42b5945c99d92d55b5d401f2d934fe8fcc19f98b
Author: Zdenek Pavlas <zpavlas@redhat.com>
Date: Mon Nov 4 15:32:43 2013 +0100

Tell curl to return immediately on ctrl-c. BZ 1017491

diff --git a/urlgrabber/grabber.py b/urlgrabber/grabber.py
index 03fa1ba..579c2ff 100644
--- a/urlgrabber/grabber.py
+++ b/urlgrabber/grabber.py
@@ -1903,6 +1903,8 @@ class PyCurlFileObject(object):
urllib.addinfourl, via. urllib.URLopener.* """
return self.url
+# tell curl to return immediately on ctrl-c
+pycurl.global_init(pycurl.GLOBAL_DEFAULT | pycurl.GLOBAL_ACK_EINTR)
_curl_cache = pycurl.Curl() # make one and reuse it over and over and over
def reset_curl_obj():
commit 857af5ac1cec0a06755b75286ae2192e398cd341
Author: Zdenek Pavlas <zpavlas@redhat.com>
Date: Wed Dec 18 13:12:00 2013 +0100

Don't fail with older pycurl.

diff --git a/urlgrabber/grabber.py b/urlgrabber/grabber.py
index 579c2ff..ef18d6a 100644
--- a/urlgrabber/grabber.py
+++ b/urlgrabber/grabber.py
@@ -1903,8 +1903,9 @@ class PyCurlFileObject(object):
urllib.addinfourl, via. urllib.URLopener.* """
return self.url
-# tell curl to return immediately on ctrl-c
-pycurl.global_init(pycurl.GLOBAL_DEFAULT | pycurl.GLOBAL_ACK_EINTR)
+if hasattr(pycurl, 'GLOBAL_ACK_EINTR'):
+ # fail immediately on ctrl-c
+ pycurl.global_init(pycurl.GLOBAL_DEFAULT | pycurl.GLOBAL_ACK_EINTR)
_curl_cache = pycurl.Curl() # make one and reuse it over and over and over
def reset_curl_obj():
commit bf0a0be71373dec515bbb54e0613a3b9b0c00b04
Author: Valentina Mukhamedzhanova <vmukhame@redhat.com>
Date: Tue Feb 4 10:11:58 2014 +0100

Treat curl errcodes 42, 55, 66 as Ctrl-C BZ#1017491

diff --git a/urlgrabber/grabber.py b/urlgrabber/grabber.py
index 04f1179..f8deeb8 100644
--- a/urlgrabber/grabber.py
+++ b/urlgrabber/grabber.py
@@ -1517,7 +1517,7 @@ class PyCurlFileObject(object):
err.url = errurl
raise err
- elif errcode == 42:
+ elif errcode in (42, 55, 56):
# this is probably wrong but ultimately this is what happens
# we have a legit http code and a pycurl 'writer failed' code
# which almost always means something aborted it from outside

18
SOURCES/BZ-1082648-curl-77-error-message.patch

@ -0,0 +1,18 @@ @@ -0,0 +1,18 @@
commit e19463a8781fa5de74e62a09684259309b2f89cf
Author: Valentina Mukhamedzhanova <vmukhame@redhat.com>
Date: Mon Sep 22 14:45:10 2014 +0200

Add a comprehensive error message to pycurl error 77. BZ 1082648

diff --git a/urlgrabber/grabber.py b/urlgrabber/grabber.py
index fd31123..961d18a 100644
--- a/urlgrabber/grabber.py
+++ b/urlgrabber/grabber.py
@@ -1576,6 +1576,7 @@ class PyCurlFileObject(object):
67 : _("Authentication failure"),
70 : _("Out of disk space on server"),
73 : _("Remove file exists"),
+ 77 : _("Problem with the SSL CA cert (path? access rights?)"),
}
errstr = str(e.args[1]) or pyerr2str.get(errcode, '<Unknown>')
if code and not 200 <= code <= 299:

19
SOURCES/BZ-1099101-revert-curl-ctrl-c.patch

@ -0,0 +1,19 @@ @@ -0,0 +1,19 @@
commit 545eb9bbc325a2f59803dfb199ef64dec6db8774
Author: Valentina Mukhamedzhanova <vmukhame@redhat.com>
Date: Wed Sep 10 10:04:10 2014 +0200

Revert bf0a0be71373dec515bbb54e0613a3b9b0c00b04 - curl error codes.

diff --git a/urlgrabber/grabber.py b/urlgrabber/grabber.py
index 69cd113..fd31123 100644
--- a/urlgrabber/grabber.py
+++ b/urlgrabber/grabber.py
@@ -1524,7 +1524,7 @@ class PyCurlFileObject(object):
err.url = errurl
raise err
- elif errcode in (42, 55, 56):
+ elif errcode == 42:
# this is probably wrong but ultimately this is what happens
# we have a legit http code and a pycurl 'writer failed' code
# which almost always means something aborted it from outside

24
SOURCES/BZ-1233329-timedhosts-parsing-error-handling.patch

@ -0,0 +1,24 @@ @@ -0,0 +1,24 @@
commit 7644d090e186c44d577f8d06de57bd815e0dc8b0
Author: Valentina Mukhamedzhanova <vmukhame@redhat.com>
Date: Tue Jun 23 10:00:12 2015 +0200

Don't crash on timedhosts parsing error. BZ#1233329

diff --git a/urlgrabber/grabber.py b/urlgrabber/grabber.py
index 91cdbdd..f09d4a5 100644
--- a/urlgrabber/grabber.py
+++ b/urlgrabber/grabber.py
@@ -2377,8 +2377,11 @@ class _TH:
try:
now = int(time.time())
for line in open(filename):
- host, speed, fail, ts = line.rsplit(' ', 3)
- _TH.hosts[host] = int(speed), int(fail), min(int(ts), now)
+ try:
+ host, speed, fail, ts = line.rsplit(' ', 3)
+ _TH.hosts[host] = int(speed), int(fail), min(int(ts), now)
+ except ValueError:
+ if DEBUG: DEBUG.info('Error parsing timedhosts: line "%s"', line)
except IOError: pass
_TH.dirty = False

279
SOURCES/BZ-1342179-add-retry-no-cache-opt.patch

@ -0,0 +1,279 @@ @@ -0,0 +1,279 @@
diff -up urlgrabber-3.10/test/test_mirror.py.orig urlgrabber-3.10/test/test_mirror.py
--- urlgrabber-3.10/test/test_mirror.py.orig 2013-08-26 09:09:07.000000000 +0200
+++ urlgrabber-3.10/test/test_mirror.py 2016-06-29 18:26:06.790393129 +0200
@@ -268,33 +268,55 @@ class ActionTests(TestCase):
self.assertEquals(self.g.calls, expected_calls)
self.assertEquals(urlgrabber.mirror.DEBUG.logs, expected_logs)
+import thread, socket
+LOCALPORT = 'localhost', 2000
class HttpReplyCode(TestCase):
def setUp(self):
+ # start the server
+ self.exit = False
def server():
- import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- s.bind(('localhost', 2000)); s.listen(1)
+ s.bind(LOCALPORT); s.listen(1)
while 1:
c, a = s.accept()
+ if self.exit: c.close(); break
while not c.recv(4096).endswith('\r\n\r\n'): pass
c.sendall('HTTP/1.1 %d %s\r\n' % self.reply)
+ if self.content is not None:
+ c.sendall('Content-Length: %d\r\n\r\n' % len(self.content))
+ c.sendall(self.content)
c.close()
- import thread
- self.reply = 503, "Busy"
+ s.close()
+ self.exit = False
thread.start_new_thread(server, ())
+ # create grabber and mirror group objects
def failure(obj):
self.code = getattr(obj.exception, 'code', None)
return {}
self.g = URLGrabber()
- self.mg = MirrorGroup(self.g, ['http://localhost:2000/'], failure_callback = failure)
+ self.mg = MirrorGroup(self.g, ['http://%s:%d' % LOCALPORT],
+ failure_callback = failure)
+
+ def tearDown(self):
+ # shut down the server
+ self.exit = True
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect(LOCALPORT); s.close() # wake it up
+ while self.exit: pass # poor man's join
def test_grab(self):
+ 'tests the propagation of HTTP reply code'
+ self.reply = 503, "Busy"
+ self.content = None
+
+ # single
self.assertRaises(URLGrabError, self.mg.urlgrab, 'foo')
self.assertEquals(self.code, 503); del self.code
+ # multi
err = []
self.mg.urlgrab('foo', async = True, failfunc = err.append)
urlgrabber.grabber.parallel_wait()
diff -up urlgrabber-3.10/test/test_mirror.py.orig urlgrabber-3.10/test/test_mirror.py
--- urlgrabber-3.10/test/test_mirror.py.orig 2016-06-29 18:26:06.790393129 +0200
+++ urlgrabber-3.10/test/test_mirror.py 2016-06-29 18:26:58.886148544 +0200
@@ -268,13 +268,14 @@ class ActionTests(TestCase):
self.assertEquals(self.g.calls, expected_calls)
self.assertEquals(urlgrabber.mirror.DEBUG.logs, expected_logs)
-import thread, socket
+import threading, socket
LOCALPORT = 'localhost', 2000
class HttpReplyCode(TestCase):
def setUp(self):
# start the server
self.exit = False
+ self.process = lambda data: None
def server():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
@@ -282,7 +283,10 @@ class HttpReplyCode(TestCase):
while 1:
c, a = s.accept()
if self.exit: c.close(); break
- while not c.recv(4096).endswith('\r\n\r\n'): pass
+ data = ''
+ while not data.endswith('\r\n\r\n'):
+ data = c.recv(4096)
+ self.process(data)
c.sendall('HTTP/1.1 %d %s\r\n' % self.reply)
if self.content is not None:
c.sendall('Content-Length: %d\r\n\r\n' % len(self.content))
@@ -290,7 +294,8 @@ class HttpReplyCode(TestCase):
c.close()
s.close()
self.exit = False
- thread.start_new_thread(server, ())
+ self.thread = threading.Thread(target=server)
+ self.thread.start()
# create grabber and mirror group objects
def failure(obj):
@@ -305,7 +310,7 @@ class HttpReplyCode(TestCase):
self.exit = True
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(LOCALPORT); s.close() # wake it up
- while self.exit: pass # poor man's join
+ self.thread.join()
def test_grab(self):
'tests the propagation of HTTP reply code'
@@ -323,6 +328,45 @@ class HttpReplyCode(TestCase):
self.assertEquals([e.exception.errno for e in err], [256])
self.assertEquals(self.code, 503); del self.code
+ def test_retry_no_cache(self):
+ 'test bypassing proxy cache on failure'
+ def process(data):
+ if 'Pragma:no-cache' in data:
+ self.content = 'version2'
+ else:
+ self.content = 'version1'
+
+ def checkfunc_read(obj):
+ if obj.data == 'version1':
+ raise URLGrabError(-1, 'Outdated version of foo')
+
+ def checkfunc_grab(obj):
+ with open('foo') as f:
+ if f.read() == 'version1':
+ raise URLGrabError(-1, 'Outdated version of foo')
+
+ self.process = process
+ self.reply = 200, "OK"
+
+ opts = self.g.opts
+ opts.retry = 3
+ opts.retry_no_cache = True
+
+ # single
+ opts.checkfunc = checkfunc_read
+ try:
+ self.mg.urlread('foo')
+ except URLGrabError as e:
+ self.fail(str(e))
+
+ # multi
+ opts.checkfunc = checkfunc_grab
+ self.mg.urlgrab('foo', async=True)
+ try:
+ urlgrabber.grabber.parallel_wait()
+ except URLGrabError as e:
+ self.fail(str(e))
+
def suite():
tl = TestLoader()
return tl.loadTestsFromModule(sys.modules[__name__])
diff -up urlgrabber-3.10/urlgrabber/grabber.py.orig urlgrabber-3.10/urlgrabber/grabber.py
--- urlgrabber-3.10/urlgrabber/grabber.py.orig 2016-06-29 18:25:53.964453346 +0200
+++ urlgrabber-3.10/urlgrabber/grabber.py 2016-06-29 18:26:58.886148544 +0200
@@ -171,6 +171,12 @@ GENERAL ARGUMENTS (kwargs)
The libproxy code is only used if the proxies dictionary
does not provide any proxies.
+ no_cache = False
+
+ When True, server-side cache will be disabled for http and https
+ requests. This is equivalent to setting
+ http_headers = (('Pragma', 'no-cache'),)
+
prefix = None
a url prefix that will be prepended to all requested urls. For
@@ -383,10 +389,11 @@ RETRY RELATED ARGUMENTS
identical to checkfunc, except for the attributes defined in the
CallbackObject instance. The attributes for failure_callback are:
- exception = the raised exception
- url = the url we're trying to fetch
- tries = the number of tries so far (including this one)
- retry = the value of the retry option
+ exception = the raised exception
+ url = the url we're trying to fetch
+ tries = the number of tries so far (including this one)
+ retry = the value of the retry option
+ retry_no_cache = the value of the retry_no_cache option
The callback is present primarily to inform the calling program of
the failure, but if it raises an exception (including the one it's
@@ -431,6 +438,19 @@ RETRY RELATED ARGUMENTS
passed the same arguments, so you could use the same function for
both.
+ retry_no_cache = False
+
+ When True, automatically enable no_cache for future retries if
+ checkfunc performs an unsuccessful check.
+
+ This option is useful if your application expects a set of files
+ from the same server to form an atomic unit and you write your
+ checkfunc to ensure each file being downloaded belongs to such a
+ unit. If transparent proxy caching is in effect, the files can
+ become out-of-sync, disrupting the atomicity. Enabling this option
+ will prevent that, while ensuring that you still enjoy the benefits
+ of caching when possible.
+
BANDWIDTH THROTTLING
urlgrabber supports throttling via two values: throttle and
@@ -1001,6 +1021,8 @@ class URLGrabberOptions:
self.half_life = 30*24*60*60 # 30 days
self.default_speed = 1e6 # 1 MBit
self.ftp_disable_epsv = False
+ self.no_cache = False
+ self.retry_no_cache = False
def __repr__(self):
return self.format()
@@ -1077,7 +1099,8 @@ class URLGrabber(object):
if callback:
if DEBUG: DEBUG.info('calling callback: %s', callback)
obj = CallbackObject(exception=exception, url=args[0],
- tries=tries, retry=opts.retry)
+ tries=tries, retry=opts.retry,
+ retry_no_cache=opts.retry_no_cache)
_run_callback(callback, obj)
if (opts.retry is None) or (tries == opts.retry):
@@ -1089,6 +1112,8 @@ class URLGrabber(object):
if DEBUG: DEBUG.info('retrycode (%i) not in list %s, re-raising',
retrycode, opts.retrycodes)
raise
+ if retrycode is not None and retrycode < 0 and opts.retry_no_cache:
+ opts.no_cache = True
def urlopen(self, url, opts=None, **kwargs):
"""open the url and return a file object
@@ -1429,11 +1454,15 @@ class PyCurlFileObject(object):
self.curl_obj.setopt(pycurl.SSLKEYPASSWD, opts.ssl_key_pass)
#headers:
- if opts.http_headers and self.scheme in ('http', 'https'):
+ if self.scheme in ('http', 'https'):
headers = []
- for (tag, content) in opts.http_headers:
- headers.append('%s:%s' % (tag, content))
- self.curl_obj.setopt(pycurl.HTTPHEADER, headers)
+ if opts.http_headers is not None:
+ for (tag, content) in opts.http_headers:
+ headers.append('%s:%s' % (tag, content))
+ if opts.no_cache:
+ headers.append('Pragma:no-cache')
+ if headers:
+ self.curl_obj.setopt(pycurl.HTTPHEADER, headers)
# ranges:
if opts.range or opts.reget:
@@ -2055,7 +2084,8 @@ class _ExternalDownloader:
'ssl_key_pass',
'ssl_verify_peer', 'ssl_verify_host',
'size', 'max_header_size', 'ip_resolve',
- 'ftp_disable_epsv'
+ 'ftp_disable_epsv',
+ 'no_cache',
)
def start(self, opts):
@@ -2236,6 +2266,8 @@ def parallel_wait(meter=None):
except URLGrabError, ug_err:
retry = 0 # no retries
if opts.tries < retry and ug_err.errno in opts.retrycodes:
+ if ug_err.errno < 0 and opts.retry_no_cache:
+ opts.no_cache = True
start(opts, opts.tries + 1) # simple retry
continue

31
SOURCES/BZ-853432-single-conn-reset.patch

@ -0,0 +1,31 @@ @@ -0,0 +1,31 @@
From 93ad446fc6533304e33b231cd5786572773440b9 Mon Sep 17 00:00:00 2001
From: Zdenek Pavlas <zpavlas@redhat.com>
Date: Mon, 21 Oct 2013 16:52:53 +0200
Subject: [PATCH] single-conn mode: Kill the cached idle connection. BZ 853432

The multi-downloader manages connections as necessary, caching
at most one downloader per host. But there's one more connection,
used by the legacy downloader. In case keepalive=1 (default),
it's still connected, possibly to the same connection-limited
host. Kill it.
---
urlgrabber/grabber.py | 3 +++
1 file changed, 3 insertions(+)

diff --git a/urlgrabber/grabber.py b/urlgrabber/grabber.py
index b004f4d..d06cdae 100644
--- a/urlgrabber/grabber.py
+++ b/urlgrabber/grabber.py
@@ -2221,6 +2221,9 @@ def parallel_wait(meter=None):
# Turn on the max_connections=1 override. BZ 853432
if DEBUG: DEBUG.info('max_connections(%s) %s => 1', key, limit)
single.add(key)
+ # When using multi-downloader the parent's _curl_cache
+ # object is idle. Kill it, as it might use keepalive=1.
+ reset_curl_obj()
retry = opts.retry or 0
if opts.failure_callback:
--
1.7.11.7

365
SPECS/python-urlgrabber.spec

@ -0,0 +1,365 @@ @@ -0,0 +1,365 @@
%{!?python_sitelib: %define python_sitelib %(python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}

Summary: A high-level cross-protocol url-grabber
Name: python-urlgrabber
Version: 3.10
Release: 8%{?dist}
Source0: http://urlgrabber.baseurl.org/download/urlgrabber-%{version}.tar.gz
Patch1: BZ-853432-single-conn-reset.patch
Patch2: BZ-1017491-respond-to-ctrl-c.patch

# rhel-7.1.0
Patch10: BZ-1099101-revert-curl-ctrl-c.patch
Patch11: BZ-1082648-curl-77-error-message.patch

# rhel-7.2
Patch20: BZ-1233329-timedhosts-parsing-error-handling.patch

# rhel-7.3
Patch25: BZ-1342179-add-retry-no-cache-opt.patch

License: LGPLv2+
Group: Development/Libraries
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
BuildArch: noarch
BuildRequires: python-devel, python-pycurl
Url: http://urlgrabber.baseurl.org/
Provides: urlgrabber = %{version}-%{release}
Requires: python-pycurl

%description
A high-level cross-protocol url-grabber for python supporting HTTP, FTP
and file locations. Features include keepalive, byte ranges, throttling,
authentication, proxies and more.

%prep
%setup -q -n urlgrabber-%{version}
%patch1 -p1
%patch2 -p1

# rhel-7.1.0
%patch10 -p1
%patch11 -p1

# rhel-7.2
%patch20 -p1

# rhel-7.3
%patch25 -p1

%build
python setup.py build

%install
rm -rf $RPM_BUILD_ROOT
python setup.py install -O1 --root=$RPM_BUILD_ROOT
rm -rf $RPM_BUILD_ROOT/%{_docdir}/urlgrabber-%{version}

%clean
rm -rf $RPM_BUILD_ROOT

%files
%defattr(-,root,root,-)
%doc ChangeLog LICENSE README TODO
%{python_sitelib}/urlgrabber*
%{_bindir}/urlgrabber
%attr(0755,root,root) %{_libexecdir}/urlgrabber-ext-down

%changelog
* Thu Jun 30 2016 Valentina Mukhamedzhanova <vmukhame@redhat.com> - 3.10-8
- Add no_cache and retry_no_cache options.
- Resolves: bug#1342179

* Tue Jun 30 2015 Valentina Mukhamedzhanova <vmukhame@redhat.com> - 3.10-7
- Don't crash on timedhosts parsing error.
- Resolves: bug#1233329

* Wed Sep 24 2014 Valentina Mukhamedzhanova <vmukhame@redhat.com> - 3.10-6
- Add a comprehensive error message to curl error 77.
- Resolves: bug#1082648

* Wed Sep 10 2014 Valentina Mukhamedzhanova <vmukhame@redhat.com> - 3.10-5
- Revert treating curl errcodes 55 and 56 as Ctrl-C.
- Resolves: bug#1099101

* Tue Feb 4 2014 Valentina Mukhamedzhanova <vmukhame@redhat.com> - 3.10-4
- Treat curl errcodes 55 and 56 as Ctrl-C.
- Resolves: bug#1017491

* Thu Jan 16 2014 Valentina Mukhamedzhanova <vmukhame@redhat.com> - 3.10-3
- Tell curl to return immediately on ctrl-c.
- Resolves: bug#1017491

* Fri Dec 27 2013 Daniel Mach <dmach@redhat.com> - 3.10-2
- Mass rebuild 2013-12-27

* Mon Oct 21 2013 Zdenek Pavlas <zpavlas@redhat.com> - 3.10-1
- single-conn mode: Kill the cached idle connection. BZ 853432

* Thu Oct 17 2013 Zdenek Pavlas <zpavlas@redhat.com> - 3.10-0
- clamp timestamps from the future. BZ 894630, 1013733
- Fix the condition to enter single-connection mode. BZ 853432
- Fix unit tests

* Tue Oct 8 2013 Zdenek Pavlas <zpavlas@redhat.com> - 3.9.1-33
- Fix the condition to enter single-connection mode (again)

* Thu Oct 3 2013 Zdenek Pavlas <zpavlas@redhat.com> - 3.9.1-32
- Update to latest HEAD.
- Fix traceback when entering single-connection mode with DEBUG off
- Fix traceback when limit==None and DEBUG is on
- Fix the condition to enter single-connection mode

* Thu Oct 3 2013 Zdenek Pavlas <zpavlas@redhat.com> - 3.9.1-31
- Update to latest HEAD.
- Switch to max_connections=1 after refused connect. BZ 853432
- Never display negative downloading speed. BZ 1001767
- clamp timestamps from the future. BZ 894630, 1013733

* Thu Aug 29 2013 Zdenek Pavlas <zpavlas@redhat.com> - 3.9.1-30
- Update to latest HEAD.
- add ftp_disable_epsv option. BZ 849177
- Spelling fixes.
- docs: throttling is per-connection, suggest max_connections=1. BZ 998263
- More robust "Content-Length" parsing. BZ 1000841

* Tue Jun 18 2013 Zdenek Pavlas <zpavlas@redhat.com> - 3.9.1-29
- Update to latest HEAD.
- Fix parsing of FTP 213 responses
- Switch to max_connections=1 after timing out. BZ 853432
- max_connections=0 should imply the default limit.

* Fri May 17 2013 Zdenek Pavlas <zpavlas@redhat.com> - 3.9.1-28
- Update to latest HEAD.
- Add the "minrate" option. BZ 964298
- Workaround progress "!!!" end for file:// repos.
- add URLGrabError.code to the external downloader API
- Disable GSSNEGOTIATE to work around a curl bug. BZ 960163

* Wed Mar 27 2013 Zdenek Pavlas <zpavlas@redhat.com> - 3.9.1-26
- Update to latest HEAD.
- Handle HTTP 200 response to range requests correctly. BZ 919076
- Reset curl_obj to clear CURLOPT_RANGE from previous requests. BZ 923951

* Thu Mar 7 2013 Zdeněk Pavlas <zpavlas@redhat.com> - 3.9.1-25
- Update to latest HEAD.
- fix some test cases that were failing. BZ 918658
- exit(1) or /bin/urlgrabber failures. BZ 918613
- clamp timestamps from the future. BZ 894630
- enable GSSNEGOTIATE if implemented correctly.
- make error messages more verbose.

* Thu Feb 14 2013 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.1-24
- Rebuilt for https://fedoraproject.org/wiki/Fedora_19_Mass_Rebuild

* Mon Jan 7 2013 Zdeněk Pavlas <zpavlas@redhat.com> - 3.9.1-23
- Update to latest HEAD.
- Handle checkfunc unicode exceptions. BZ 672117

* Thu Dec 6 2012 Zdeněk Pavlas <zpavlas@redhat.com> - 3.9.1-22
- Update to latest HEAD.
- Improve URLGRABBER_DEBUG, add max_connections. BZ 853432

* Thu Nov 1 2012 Zdeněk Pavlas <zpavlas@redhat.com> - 3.9.1-21
- Update to latest HEAD.
- Get rid of "HTTP 200 OK" errors. BZ 871835.

* Tue Sep 4 2012 Zdeněk Pavlas <zpavlas@redhat.com> - 3.9.1-20
- Update to latest HEAD.
- Fixed BZ 851178, 854075.

* Mon Aug 27 2012 Zdeněk Pavlas <zpavlas@redhat.com> - 3.9.1-19
- timedhosts: defer 1st update until a 1MB+ download. BZ 851178

* Wed Aug 22 2012 Zdeněk Pavlas <zpavlas@redhat.com> - 3.9.1-18
- Update to latest HEAD, lots of enhancements.

* Wed Aug 10 2012 Zdeněk Pavlas <zpavlas@redhat.com> - 3.9.1-17
- Fix a bug in progress display code. BZ 847105.

* Wed Aug 8 2012 Zdeněk Pavlas <zpavlas@redhat.com> - 3.9.1-16
- Update to latest head.
- Improved multi-file progress, small bugfixes.

* Fri Jul 20 2012 Zdeněk Pavlas <zpavlas@redhat.com> - 3.9.1-15
- Update to latest head, misc bugfixes: BZ 832028, 831904, 831291.
- Disable Kerberos auth. BZ 769254
- copy_local bugfix. BZ 837018
- send 'tries' counter to mirror failure callback

* Mon May 21 2012 Zdeněk Pavlas <zpavlas@redhat.com> - 3.9.1-14
- timedhosts: sanity check on dl_time

* Fri May 18 2012 Zdeněk Pavlas <zpavlas@redhat.com> - 3.9.1-13
- fix file:// profiling. BZ 822632.

* Mon May 14 2012 Zdeněk Pavlas <zpavlas@redhat.com> - 3.9.1-12
- Update to latest HEAD
- Merge multi-downloader patches

* Sat Jan 14 2012 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.1-11
- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild

* Wed Feb 09 2011 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.9.1-10
- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild

* Fri Sep 3 2010 Seth Vidal <skvidal at fedoraproject.org> - 3.9.1-9
- new update to latest head with a number of patches collected from
older bug reports.

* Mon Aug 30 2010 Seth Vidal <skvidal at fedoraproject.org> - 3.9.1-8
- update to latest head patches

* Thu Jul 22 2010 David Malcolm <dmalcolm@redhat.com> - 3.9.1-7
- Rebuilt for https://fedoraproject.org/wiki/Features/Python_2.7/MassRebuild

* Tue Apr 13 2010 James Antill <james@fedoraproject.org> 3.9.1-6
- Update to upstream HEAD.
- LOWSPEEDLIMIT and hdrs

* Fri Feb 19 2010 Seth Vidal <skvidal at fedoraproject.org> - 3.9.1-5
- add patch to allow reset_curl_obj() to close and reload the cached curl obj

* Thu Nov 12 2009 Seth Vidal <skvidal at fedoraproject.org> - 3.9.1-4
- reset header values when we redirect and make sure debug output will work

* Wed Nov 11 2009 Seth Vidal <skvidal at fedoraproject.org> - 3.9.1-3
- fixing a bunch of redirect and max size bugs

* Fri Sep 25 2009 Seth Vidal <skvidal at fedoraproject.org> - 3.9.1-2
- stupid patch

* Fri Sep 25 2009 Seth Vidal <skvidal at fedoraproject.org> - 3.9.1-1
- 3.9.1

* Tue Aug 18 2009 Seth Vidal <skvidal at fedoraproject.org> - 3.9.0-8
- ssl options, http POST string type fixes

* Mon Aug 10 2009 Seth Vidal <skvidal at fedoraproject.org> - 3.9.0-6
- reget fixes, tmpfiles no longer made for urlopen() calls.

* Wed Aug 5 2009 Seth Vidal <skvidal at fedoraproject.org> - 3.9.0-5
- apply complete patch to head fixes: timeouts, regets, improves exception raising

* Tue Aug 4 2009 Seth Vidal <skvidal at fedoraproject.org> - 3.9.0-4
- timeout patch for https://bugzilla.redhat.com/show_bug.cgi?id=515497


* Thu Jul 30 2009 Seth Vidal <skvidal at fedoraproject.org> - 3.9.0-1
- new version - curl-based

* Wed Apr 8 2009 James Antill <james@fedoraproject.org> 3.0.0-15
- Fix progress bars for serial consoles.
- Make C-c behaviour a little nicer.

* Fri Mar 13 2009 Seth Vidal <skvidal at fedoraproject.org>
- kill deprecation warning from importing md5 if anyone uses keepalive

* Mon Mar 9 2009 Seth Vidal <skvidal at fedoraproject.org>
- apply patch for urlgrabber to properly check file:// urls with the checkfunc

* Thu Feb 26 2009 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 3.0.0-12
- Rebuilt for https://fedoraproject.org/wiki/Fedora_11_Mass_Rebuild

* Fri Nov 28 2008 Ignacio Vazquez-Abrams <ivazqueznet+rpm@gmail.com> 3.0.0-11
- Rebuild for Python 2.6

* Wed Oct 14 2008 James Antill <james@fedoraproject.org> 3.0.0-10
- Have the progress bar have a small bar, for a virtual size doubling.

* Thu Jul 10 2008 James Antill <james@fedoraproject.org> 3.0.0-9
- Make urlgrabber usable if openssl is broken
- Relates: bug#454179

* Sun Jun 15 2008 James Antill <james@fedoraproject.org> 3.0.0-9
- Don't count partial downloads toward the total

* Sat May 18 2008 James Antill <james@fedoraproject.org> 3.0.0-8
- Tweak progress output so it's hopefully less confusing
- Add dynamic resizing ability to progress bar
- Resolves: bug#437197

* Fri May 2 2008 James Antill <james@fedoraproject.org> 3.0.0-7
- Fix reget's against servers that don't allow Range requests, also tweaks
- reget == check_timestamp, if anyone/thing uses that.
- Resolves: bug#435156
- Fix minor typo in progress for single instance.

* Mon Apr 7 2008 James Antill <james@fedoraproject.org> 3.0.0-6
- Fix the ftp byterange port problem:
- Resolves: bug#419241
- Fixup the progress UI:
- add function for total progress
- add total progress percentagee current download line
- add rate to current download line
- use dead space when finished downloading
- don't confuse download rate on regets.

* Sat Mar 15 2008 Robert Scheck <robert@fedoraproject.org> 3.0.0-5
- Make sure, that *.egg-info is catched up during build

* Mon Dec 3 2007 Jeremy Katz <katzj@redhat.com> - 3.0.0-4
- Ensure fds are closed on exceptions (markmc, #404211)

* Wed Oct 10 2007 Jeremy Katz <katzj@redhat.com> - 3.0.0-3
- fix type checking of strings to also include unicode strings; fixes
regets from yum (#235618)

* Mon Aug 27 2007 Jeremy Katz <katzj@redhat.com> - 3.0.0-2
- fixes for package review (#226347)

* Thu May 31 2007 Jeremy Katz <katzj@redhat.com> - 3.0.0-1
- update to 3.0.0

* Wed Dec 6 2006 Jeremy Katz <katzj@redhat.com> - 2.9.9-5
- rebuild for python 2.5

* Wed Dec 6 2006 Jeremy Katz <katzj@redhat.com> - 2.9.9-4
- fix keepalive (#218268)

* Sat Nov 11 2006 Florian La Roche <laroche@redhat.com>
- add version/release to "Provides: urlgrabber"

* Mon Jul 17 2006 James Bowes <jbowes@redhat.com> - 2.9.9-2
- Add support for byte ranges and keepalive over HTTPS

* Wed Jul 12 2006 Jesse Keating <jkeating@redhat.com> - 2.9.9-1.1
- rebuild

* Tue May 16 2006 Jeremy Katz <katzj@redhat.com> - 2.9.9-1
- update to 2.9.9

* Tue Mar 14 2006 Jeremy Katz <katzj@redhat.com> - 2.9.8-2
- catch read errors so they trigger the failure callback. helps catch bad cds

* Wed Feb 22 2006 Jeremy Katz <katzj@redhat.com> - 2.9.8-1
- update to new version fixing progress bars in yum on regets

* Fri Dec 09 2005 Jesse Keating <jkeating@redhat.com>
- rebuilt

* Wed Sep 21 2005 Jeremy Katz <katzj@redhat.com> - 2.9.6-4
- don't use --record and list files by hand so that we don't miss
directories (#158480)

* Wed Sep 14 2005 Jeremy Katz <katzj@redhat.com> - 2.9.6-3
- add directory to file list (#168261)

* Fri Jun 03 2005 Phil Knirsch <pknirsch@redhat.com> 2.9.6-2
- Fixed the reget method to actually work correctly (skip completely transfered
files, etc)

* Tue Mar 8 2005 Jeremy Katz <katzj@redhat.com> - 2.9.6-1
- update to 2.9.6

* Mon Mar 7 2005 Jeremy Katz <katzj@redhat.com> - 2.9.5-1
- import into dist
- make the description less of a book

* Mon Mar 7 2005 Seth Vidal <skvidal@phy.duke.edu> 2.9.5-0
- 2.9.5

* Thu Feb 24 2005 Seth Vidal <skvidal@phy.duke.edu> 2.9.3-0
- first package for fc3
- named python-urlgrabber for naming guideline compliance

Loading…
Cancel
Save