[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

Bug#943364: buster-pu: package python2.7/2.7.16-2+deb10u1



Package: release.debian.org
Severity: normal
Tags: buster
User: release.debian.org@packages.debian.org
Usertags: pu

This fixes a number of low severity issues which have popped up since
the initial Buster release. Debdiff below.

Cheers,
        Moritz

diff -u python2.7-2.7.16/debian/changelog python2.7-2.7.16/debian/changelog
--- python2.7-2.7.16/debian/changelog
+++ python2.7-2.7.16/debian/changelog
@@ -1,3 +1,14 @@
+python2.7 (2.7.16-2+deb10u1) buster; urgency=medium
+
+  * CVE-2018-20852
+  * CVE-2019-10160
+  * CVE-2019-16056 (Closes: #940901)
+  * CVE-2019-16935
+  * CVE-2019-9740
+  * CVE-2019-9947
+
+ -- Moritz Mühlenhoff <jmm@debian.org>  Fri, 11 Oct 2019 00:02:15 +0200
+
 python2.7 (2.7.16-2) unstable; urgency=high
 
   [ Matthias Klose ]
diff -u python2.7-2.7.16/debian/patches/series.in python2.7-2.7.16/debian/patches/series.in
--- python2.7-2.7.16/debian/patches/series.in
+++ python2.7-2.7.16/debian/patches/series.in
@@ -75,0 +76,5 @@
+CVE-2018-20852.diff
+CVE-2019-10160.diff
+CVE-2019-16056.diff
+CVE-2019-16935.diff
+CVE-2019-9740_CVE-2019-9947.diff
only in patch2:
unchanged:
--- python2.7-2.7.16.orig/debian/patches/CVE-2018-20852.diff
+++ python2.7-2.7.16/debian/patches/CVE-2018-20852.diff
@@ -0,0 +1,93 @@
+Commit 979daae300916adb399ab5b51410b6ebd0888f13 from the 2.7 branch
+
+diff -Naur python2.7-2.7.16.orig/Lib/cookielib.py python2.7-2.7.16/Lib/cookielib.py
+--- python2.7-2.7.16.orig/Lib/cookielib.py	2019-03-02 19:17:42.000000000 +0100
++++ python2.7-2.7.16/Lib/cookielib.py	2019-10-11 15:33:02.648671958 +0200
+@@ -1139,6 +1139,11 @@
+         req_host, erhn = eff_request_host(request)
+         domain = cookie.domain
+ 
++        if domain and not domain.startswith("."):
++            dotdomain = "." + domain
++        else:
++            dotdomain = domain
++
+         # strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
+         if (cookie.version == 0 and
+             (self.strict_ns_domain & self.DomainStrictNonDomain) and
+@@ -1151,7 +1156,7 @@
+             _debug("   effective request-host name %s does not domain-match "
+                    "RFC 2965 cookie domain %s", erhn, domain)
+             return False
+-        if cookie.version == 0 and not ("."+erhn).endswith(domain):
++        if cookie.version == 0 and not ("."+erhn).endswith(dotdomain):
+             _debug("   request-host %s does not match Netscape cookie domain "
+                    "%s", req_host, domain)
+             return False
+@@ -1165,7 +1170,11 @@
+             req_host = "."+req_host
+         if not erhn.startswith("."):
+             erhn = "."+erhn
+-        if not (req_host.endswith(domain) or erhn.endswith(domain)):
++        if domain and not domain.startswith("."):
++            dotdomain = "." + domain
++        else:
++            dotdomain = domain
++        if not (req_host.endswith(dotdomain) or erhn.endswith(dotdomain)):
+             #_debug("   request domain %s does not match cookie domain %s",
+             #       req_host, domain)
+             return False
+diff -Naur python2.7-2.7.16.orig/Lib/test/test_cookielib.py python2.7-2.7.16/Lib/test/test_cookielib.py
+--- python2.7-2.7.16.orig/Lib/test/test_cookielib.py	2019-03-02 19:17:42.000000000 +0100
++++ python2.7-2.7.16/Lib/test/test_cookielib.py	2019-10-11 15:33:02.648671958 +0200
+@@ -368,6 +368,7 @@
+             ("http://foo.bar.com/";, ".foo.bar.com", True),
+             ("http://foo.bar.com/";, "foo.bar.com", True),
+             ("http://foo.bar.com/";, ".bar.com", True),
++            ("http://foo.bar.com/";, "bar.com", True),
+             ("http://foo.bar.com/";, "com", True),
+             ("http://foo.com/";, "rhubarb.foo.com", False),
+             ("http://foo.com/";, ".foo.com", True),
+@@ -378,6 +379,8 @@
+             ("http://foo/";, "foo", True),
+             ("http://foo/";, "foo.local", True),
+             ("http://foo/";, ".local", True),
++            ("http://barfoo.com";, ".foo.com", False),
++            ("http://barfoo.com";, "foo.com", False),
+             ]:
+             request = urllib2.Request(url)
+             r = pol.domain_return_ok(domain, request)
+@@ -938,6 +941,33 @@
+         c.add_cookie_header(req)
+         self.assertFalse(req.has_header("Cookie"))
+ 
++        c.clear()
++
++        pol.set_blocked_domains([])
++        req = Request("http://acme.com/";)
++        res = FakeResponse(headers, "http://acme.com/";)
++        cookies = c.make_cookies(res, req)
++        c.extract_cookies(res, req)
++        self.assertEqual(len(c), 1)
++
++        req = Request("http://acme.com/";)
++        c.add_cookie_header(req)
++        self.assertTrue(req.has_header("Cookie"))
++
++        req = Request("http://badacme.com/";)
++        c.add_cookie_header(req)
++        self.assertFalse(pol.return_ok(cookies[0], req))
++        self.assertFalse(req.has_header("Cookie"))
++
++        p = pol.set_blocked_domains(["acme.com"])
++        req = Request("http://acme.com/";)
++        c.add_cookie_header(req)
++        self.assertFalse(req.has_header("Cookie"))
++
++        req = Request("http://badacme.com/";)
++        c.add_cookie_header(req)
++        self.assertFalse(req.has_header("Cookie"))
++
+     def test_secure(self):
+         from cookielib import CookieJar, DefaultCookiePolicy
+ 
only in patch2:
unchanged:
--- python2.7-2.7.16.orig/debian/patches/CVE-2019-10160.diff
+++ python2.7-2.7.16/debian/patches/CVE-2019-10160.diff
@@ -0,0 +1,71 @@
+This patch consists of the following commits from the 2.7 branch:
+98a4dcefbbc3bce5ab07e7c0830a183157250259
+61599b050c621386a3fc6bc480359e2d3bb93de
+2b578479b96aa3deeeb8bac313a02b5cf3cb1aff
+507bd8cde60ced74d13a1ffa883bb9b0e73c38be (not part of security fix, but dependent)
+
+diff -Naur python2.7-2.7.16.orig/Lib/test/test_urlparse.py python2.7-2.7.16/Lib/test/test_urlparse.py
+--- python2.7-2.7.16.orig/Lib/test/test_urlparse.py	2019-10-09 17:52:19.875053907 +0200
++++ python2.7-2.7.16/Lib/test/test_urlparse.py	2019-10-09 17:55:02.936834540 +0200
+@@ -641,12 +641,29 @@
+         self.assertIn(u'\u2100', denorm_chars)
+         self.assertIn(u'\uFF03', denorm_chars)
+ 
++        # bpo-36742: Verify port separators are ignored when they
++        # existed prior to decomposition
++        urlparse.urlsplit(u'http://\u30d5\u309a:80')
++        with self.assertRaises(ValueError):
++            urlparse.urlsplit(u'http://\u30d5\u309a\ufe1380')
++
+         for scheme in [u"http", u"https", u"ftp"]:
+-            for c in denorm_chars:
+-                url = u"{}://netloc{}false.netloc/path".format(scheme, c)
+-                print "Checking %r" % url
+-                with self.assertRaises(ValueError):
+-                    urlparse.urlsplit(url)
++            for netloc in [u"netloc{}false.netloc", u"n{}user@netloc"]:
++                for c in denorm_chars:
++                    url = u"{}://{}/path".format(scheme, netloc.format(c))
++                    if test_support.verbose:
++                        print "Checking %r" % url
++                    with self.assertRaises(ValueError):
++                        urlparse.urlsplit(url)
++
++        # check error message: invalid netloc must be formated with repr()
++        # to get an ASCII error message
++        with self.assertRaises(ValueError) as cm:
++            urlparse.urlsplit(u'http://example.com\uFF03@bing.com')
++        self.assertEqual(str(cm.exception),
++                         "netloc u'example.com\\uff03@bing.com' contains invalid characters "
++                         "under NFKC normalization")
++        self.assertIsInstance(cm.exception.args[0], str)
+ 
+ def test_main():
+     test_support.run_unittest(UrlParseTestCase)
+diff -Naur python2.7-2.7.16.orig/Lib/urlparse.py python2.7-2.7.16/Lib/urlparse.py
+--- python2.7-2.7.16.orig/Lib/urlparse.py	2019-10-09 17:52:19.875053907 +0200
++++ python2.7-2.7.16/Lib/urlparse.py	2019-10-09 17:55:02.936834540 +0200
+@@ -171,14 +171,18 @@
+     # looking for characters like \u2100 that expand to 'a/c'
+     # IDNA uses NFKC equivalence, so normalize for this check
+     import unicodedata
+-    netloc2 = unicodedata.normalize('NFKC', netloc)
+-    if netloc == netloc2:
++    n = netloc.replace(u'@', u'') # ignore characters already included
++    n = n.replace(u':', u'')      # but not the surrounding text
++    n = n.replace(u'#', u'')
++    n = n.replace(u'?', u'')
++    netloc2 = unicodedata.normalize('NFKC', n)
++    if n == netloc2:
+         return
+-    _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay
+     for c in '/?#@:':
+         if c in netloc2:
+-            raise ValueError("netloc '" + netloc2 + "' contains invalid " +
+-                             "characters under NFKC normalization")
++            raise ValueError("netloc %r contains invalid characters "
++                             "under NFKC normalization"
++                             % netloc)
+ 
+ def urlsplit(url, scheme='', allow_fragments=True):
+     """Parse a URL into 5 components:
only in patch2:
unchanged:
--- python2.7-2.7.16.orig/debian/patches/CVE-2019-16056.diff
+++ python2.7-2.7.16/debian/patches/CVE-2019-16056.diff
@@ -0,0 +1,54 @@
+Commit 4cbcd2f8c4e12b912e4d21fd892eedf7a3813d8e from the 2.7 branch
+
+diff -Naur python2.7-2.7.16.orig/Lib/email/_parseaddr.py python2.7-2.7.16/Lib/email/_parseaddr.py
+--- python2.7-2.7.16.orig/Lib/email/_parseaddr.py	2019-03-02 19:17:42.000000000 +0100
++++ python2.7-2.7.16/Lib/email/_parseaddr.py	2019-10-11 15:37:54.298250288 +0200
+@@ -336,7 +336,12 @@
+         aslist.append('@')
+         self.pos += 1
+         self.gotonext()
+-        return EMPTYSTRING.join(aslist) + self.getdomain()
++        domain = self.getdomain()
++        if not domain:
++            # Invalid domain, return an empty address instead of returning a
++            # local part to denote failed parsing.
++            return EMPTYSTRING
++        return EMPTYSTRING.join(aslist) + domain
+ 
+     def getdomain(self):
+         """Get the complete domain name from an address."""
+@@ -351,6 +356,10 @@
+             elif self.field[self.pos] == '.':
+                 self.pos += 1
+                 sdlist.append('.')
++            elif self.field[self.pos] == '@':
++                # bpo-34155: Don't parse domains with two `@` like
++                # `a@malicious.org@important.com`.
++                return EMPTYSTRING
+             elif self.field[self.pos] in self.atomends:
+                 break
+             else:
+diff -Naur python2.7-2.7.16.orig/Lib/email/test/test_email.py python2.7-2.7.16/Lib/email/test/test_email.py
+--- python2.7-2.7.16.orig/Lib/email/test/test_email.py	2019-03-02 19:17:42.000000000 +0100
++++ python2.7-2.7.16/Lib/email/test/test_email.py	2019-10-11 15:37:54.298250288 +0200
+@@ -2306,6 +2306,20 @@
+         self.assertEqual(Utils.parseaddr('<>'), ('', ''))
+         self.assertEqual(Utils.formataddr(Utils.parseaddr('<>')), '')
+ 
++    def test_parseaddr_multiple_domains(self):
++        self.assertEqual(
++            Utils.parseaddr('a@b@c'),
++            ('', '')
++        )
++        self.assertEqual(
++            Utils.parseaddr('a@b.c@c'),
++            ('', '')
++        )
++        self.assertEqual(
++            Utils.parseaddr('a@172.17.0.1@c'),
++            ('', '')
++        )
++
+     def test_noquote_dump(self):
+         self.assertEqual(
+             Utils.formataddr(('A Silly Person', 'person@dom.ain')),
only in patch2:
unchanged:
--- python2.7-2.7.16.orig/debian/patches/CVE-2019-16935.diff
+++ python2.7-2.7.16/debian/patches/CVE-2019-16935.diff
@@ -0,0 +1,68 @@
+Commit 8eb64155ff26823542ccf0225b3d57b6ae36ea89 from the 2.7 branch
+
+diff -Naur python2.7-2.7.16.orig/Lib/DocXMLRPCServer.py python2.7-2.7.16/Lib/DocXMLRPCServer.py
+--- python2.7-2.7.16.orig/Lib/DocXMLRPCServer.py	2019-03-02 19:17:42.000000000 +0100
++++ python2.7-2.7.16/Lib/DocXMLRPCServer.py	2019-10-10 13:09:04.403495695 +0200
+@@ -20,6 +20,16 @@
+             CGIXMLRPCRequestHandler,
+             resolve_dotted_attribute)
+ 
++
++def _html_escape_quote(s):
++    s = s.replace("&", "&amp;") # Must be done first!
++    s = s.replace("<", "&lt;")
++    s = s.replace(">", "&gt;")
++    s = s.replace('"', "&quot;")
++    s = s.replace('\'', "&#x27;")
++    return s
++
++
+ class ServerHTMLDoc(pydoc.HTMLDoc):
+     """Class used to generate pydoc HTML document for a server"""
+ 
+@@ -210,7 +220,8 @@
+                                 methods
+                             )
+ 
+-        return documenter.page(self.server_title, documentation)
++        title = _html_escape_quote(self.server_title)
++        return documenter.page(title, documentation)
+ 
+ class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
+     """XML-RPC and documentation request handler class.
+diff -Naur python2.7-2.7.16.orig/Lib/test/test_docxmlrpc.py python2.7-2.7.16/Lib/test/test_docxmlrpc.py
+--- python2.7-2.7.16.orig/Lib/test/test_docxmlrpc.py	2019-03-02 19:17:42.000000000 +0100
++++ python2.7-2.7.16/Lib/test/test_docxmlrpc.py	2019-10-10 13:09:04.403495695 +0200
+@@ -1,5 +1,6 @@
+ from DocXMLRPCServer import DocXMLRPCServer
+ import httplib
++import re
+ import sys
+ from test import test_support
+ threading = test_support.import_module('threading')
+@@ -176,6 +177,25 @@
+         self.assertIn("""Try&nbsp;self.<strong>add</strong>,&nbsp;too.""",
+                       response.read())
+ 
++    def test_server_title_escape(self):
++        """Test that the server title and documentation
++        are escaped for HTML.
++        """
++        self.serv.set_server_title('test_title<script>')
++        self.serv.set_server_documentation('test_documentation<script>')
++        self.assertEqual('test_title<script>', self.serv.server_title)
++        self.assertEqual('test_documentation<script>',
++                self.serv.server_documentation)
++
++        generated = self.serv.generate_html_documentation()
++        title = re.search(r'<title>(.+?)</title>', generated).group()
++        documentation = re.search(r'<p><tt>(.+?)</tt></p>', generated).group()
++        self.assertEqual('<title>Python: test_title&lt;script&gt;</title>',
++                title)
++        self.assertEqual('<p><tt>test_documentation&lt;script&gt;</tt></p>',
++                documentation)
++
++
+ def test_main():
+     test_support.run_unittest(DocXMLRPCHTTPGETServer)
+ 
only in patch2:
unchanged:
--- python2.7-2.7.16.orig/debian/patches/CVE-2019-9740_CVE-2019-9947.diff
+++ python2.7-2.7.16/debian/patches/CVE-2019-9740_CVE-2019-9947.diff
@@ -0,0 +1,163 @@
+Patch bb8071a4cae5ab3fe321481dd3d73662ffb26052 from the 2.7 branch
+
+diff -Naur python2.7-2.7.16.orig/Lib/httplib.py python2.7-2.7.16/Lib/httplib.py
+--- python2.7-2.7.16.orig/Lib/httplib.py	2019-03-02 19:17:42.000000000 +0100
++++ python2.7-2.7.16/Lib/httplib.py	2019-10-09 18:10:09.780991094 +0200
+@@ -247,6 +247,16 @@
+ _is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match
+ _is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search
+ 
++# These characters are not allowed within HTTP URL paths.
++#  See https://tools.ietf.org/html/rfc3986#section-3.3 and the
++#  https://tools.ietf.org/html/rfc3986#appendix-A pchar definition.
++# Prevents CVE-2019-9740.  Includes control characters such as \r\n.
++# Restrict non-ASCII characters above \x7f (0x80-0xff).
++_contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f-\xff]')
++# Arguably only these _should_ allowed:
++#  _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
++# We are more lenient for assumed real world compatibility purposes.
++
+ # We always set the Content-Length header for these methods because some
+ # servers will otherwise respond with a 411
+ _METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
+@@ -927,6 +937,12 @@
+         self._method = method
+         if not url:
+             url = '/'
++        # Prevent CVE-2019-9740.
++        match = _contains_disallowed_url_pchar_re.search(url)
++        if match:
++            raise InvalidURL("URL can't contain control characters. %r "
++                             "(found at least %r)"
++                             % (url, match.group()))
+         hdr = '%s %s %s' % (method, url, self._http_vsn_str)
+ 
+         self._output(hdr)
+diff -Naur python2.7-2.7.16.orig/Lib/test/test_urllib2.py python2.7-2.7.16/Lib/test/test_urllib2.py
+--- python2.7-2.7.16.orig/Lib/test/test_urllib2.py	2019-03-02 19:17:42.000000000 +0100
++++ python2.7-2.7.16/Lib/test/test_urllib2.py	2019-10-09 18:10:09.780991094 +0200
+@@ -15,6 +15,9 @@
+ except ImportError:
+     ssl = None
+ 
++from test.test_urllib import FakeHTTPMixin
++
++
+ # XXX
+ # Request
+ # CacheFTPHandler (hard to write)
+@@ -1262,7 +1265,7 @@
+         self.assertEqual(len(http_handler.requests), 1)
+         self.assertFalse(http_handler.requests[0].has_header(auth_header))
+ 
+-class MiscTests(unittest.TestCase):
++class MiscTests(unittest.TestCase, FakeHTTPMixin):
+ 
+     def test_build_opener(self):
+         class MyHTTPHandler(urllib2.HTTPHandler): pass
+@@ -1317,6 +1320,52 @@
+             "Unsupported digest authentication algorithm 'invalid'"
+         )
+ 
++    @unittest.skipUnless(ssl, "ssl module required")
++    def test_url_with_control_char_rejected(self):
++        for char_no in range(0, 0x21) + range(0x7f, 0x100):
++            char = chr(char_no)
++            schemeless_url = "//localhost:7777/test%s/" % char
++            self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
++            try:
++                # We explicitly test urllib.request.urlopen() instead of the top
++                # level 'def urlopen()' function defined in this... (quite ugly)
++                # test suite.  They use different url opening codepaths.  Plain
++                # urlopen uses FancyURLOpener which goes via a codepath that
++                # calls urllib.parse.quote() on the URL which makes all of the
++                # above attempts at injection within the url _path_ safe.
++                escaped_char_repr = repr(char).replace('\\', r'\\')
++                InvalidURL = httplib.InvalidURL
++                with self.assertRaisesRegexp(
++                    InvalidURL, "contain control.*" + escaped_char_repr):
++                    urllib2.urlopen("http:" + schemeless_url)
++                with self.assertRaisesRegexp(
++                    InvalidURL, "contain control.*" + escaped_char_repr):
++                    urllib2.urlopen("https:" + schemeless_url)
++            finally:
++                self.unfakehttp()
++
++    @unittest.skipUnless(ssl, "ssl module required")
++    def test_url_with_newline_header_injection_rejected(self):
++        self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
++        host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
++        schemeless_url = "//" + host + ":8080/test/?test=a"
++        try:
++            # We explicitly test urllib2.urlopen() instead of the top
++            # level 'def urlopen()' function defined in this... (quite ugly)
++            # test suite.  They use different url opening codepaths.  Plain
++            # urlopen uses FancyURLOpener which goes via a codepath that
++            # calls urllib.parse.quote() on the URL which makes all of the
++            # above attempts at injection within the url _path_ safe.
++            InvalidURL = httplib.InvalidURL
++            with self.assertRaisesRegexp(
++                InvalidURL, r"contain control.*\\r.*(found at least . .)"):
++                urllib2.urlopen("http:" + schemeless_url)
++            with self.assertRaisesRegexp(InvalidURL, r"contain control.*\\n"):
++                urllib2.urlopen("https:" + schemeless_url)
++        finally:
++            self.unfakehttp()
++
++
+ 
+ class RequestTests(unittest.TestCase):
+ 
+diff -Naur python2.7-2.7.16.orig/Lib/test/test_urllib.py python2.7-2.7.16/Lib/test/test_urllib.py
+--- python2.7-2.7.16.orig/Lib/test/test_urllib.py	2019-10-09 17:52:19.887053741 +0200
++++ python2.7-2.7.16/Lib/test/test_urllib.py	2019-10-09 18:10:09.780991094 +0200
+@@ -257,6 +257,31 @@
+         finally:
+             self.unfakehttp()
+ 
++    def test_url_with_control_char_rejected(self):
++        for char_no in range(0, 0x21) + range(0x7f, 0x100):
++            char = chr(char_no)
++            schemeless_url = "//localhost:7777/test%s/" % char
++            self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
++            try:
++                # urllib quotes the URL so there is no injection.
++                resp = urllib.urlopen("http:" + schemeless_url)
++                self.assertNotIn(char, resp.geturl())
++            finally:
++                self.unfakehttp()
++
++    def test_url_with_newline_header_injection_rejected(self):
++        self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
++        host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
++        schemeless_url = "//" + host + ":8080/test/?test=a"
++        try:
++            # urllib quotes the URL so there is no injection.
++            resp = urllib.urlopen("http:" + schemeless_url)
++            self.assertNotIn(' ', resp.geturl())
++            self.assertNotIn('\r', resp.geturl())
++            self.assertNotIn('\n', resp.geturl())
++        finally:
++            self.unfakehttp()
++
+     def test_read_bogus(self):
+         # urlopen() should raise IOError for many error codes.
+         self.fakehttp('''HTTP/1.1 401 Authentication Required
+diff -Naur python2.7-2.7.16.orig/Lib/test/test_xmlrpc.py python2.7-2.7.16/Lib/test/test_xmlrpc.py
+--- python2.7-2.7.16.orig/Lib/test/test_xmlrpc.py	2019-03-02 19:17:42.000000000 +0100
++++ python2.7-2.7.16/Lib/test/test_xmlrpc.py	2019-10-09 18:10:09.780991094 +0200
+@@ -659,7 +659,13 @@
+     def test_partial_post(self):
+         # Check that a partial POST doesn't make the server loop: issue #14001.
+         conn = httplib.HTTPConnection(ADDR, PORT)
+-        conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye')
++        conn.send('POST /RPC2 HTTP/1.0\r\n'
++                  'Content-Length: 100\r\n\r\n'
++                  'bye HTTP/1.1\r\n'
++                  'Host: %s:%s\r\n'
++                  'Accept-Encoding: identity\r\n'
++                  'Content-Length: 0\r\n\r\n'
++                  % (ADDR, PORT))
+         conn.close()
+ 
+ class SimpleServerEncodingTestCase(BaseServerTestCase):

Reply to: