Mock Version: 1.2.12 Mock Version: 1.2.12 ENTER do(['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --target noarch --nodeps /builddir/build/SPECS/python-beautifulsoup4.spec'], chrootPath='/var/lib/mock/f23-build-4143182-531291/root'shell=FalseprintOutput=Falseenv={'LANG': 'en_US.UTF-8', 'TERM': 'vt100', 'SHELL': '/bin/bash', 'PROMPT_COMMAND': 'printf "\x1b]0;\x07"', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'HOME': '/builddir', 'HOSTNAME': 'mock'}gid=425user='mockbuild'timeout=86400logger=uid=1000) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --target noarch --nodeps /builddir/build/SPECS/python-beautifulsoup4.spec'] with env {'LANG': 'en_US.UTF-8', 'TERM': 'vt100', 'SHELL': '/bin/bash', 'PROMPT_COMMAND': 'printf "\x1b]0;\x07"', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'HOME': '/builddir', 'HOSTNAME': 'mock'} and shell False sh: /usr/bin/python: No such file or directory sh: /usr/bin/python: No such file or directory warning: Could not canonicalize hostname: buildvm-21.phx2.fedoraproject.org Building target platforms: noarch Building for target noarch Wrote: /builddir/build/SRPMS/python-beautifulsoup4-4.4.1-1.fc23.src.rpm Child return code was: 0 LEAVE do --> ENTER do(['bash', '--login', '-c', '/usr/bin/rpmbuild -bb --target noarch --nodeps /builddir/build/SPECS/python-beautifulsoup4.spec '], chrootPath='/var/lib/mock/f23-build-4143182-531291/root'shell=FalseprintOutput=Falseenv={'LANG': 'en_US.UTF-8', 'TERM': 'vt100', 'SHELL': '/bin/bash', 'PROMPT_COMMAND': 'printf "\x1b]0;\x07"', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'HOME': '/builddir', 'HOSTNAME': 'mock'}gid=425user='mockbuild'timeout=86400logger=uid=1000) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bb --target noarch --nodeps /builddir/build/SPECS/python-beautifulsoup4.spec '] with env {'LANG': 'en_US.UTF-8', 'TERM': 'vt100', 'SHELL': '/bin/bash', 'PROMPT_COMMAND': 'printf "\x1b]0;\x07"', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'HOME': '/builddir', 'HOSTNAME': 'mock'} and shell False Building target platforms: noarch Building for target noarch Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.s11N2C + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf beautifulsoup4-4.4.1 + /usr/bin/gzip -dc /builddir/build/SOURCES/beautifulsoup4-4.4.1.tar.gz + /usr/bin/tar -xof - + STATUS=0 + '[' 0 -ne 0 ']' + cd beautifulsoup4-4.4.1 + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + mv AUTHORS.txt AUTHORS.txt.iso + iconv -f ISO-8859-1 -t UTF-8 -o AUTHORS.txt AUTHORS.txt.iso + touch -r AUTHORS.txt.iso AUTHORS.txt + rm -rf /builddir/build/BUILD/python3-python-beautifulsoup4-4.4.1-1.fc23 + cp -a . /builddir/build/BUILD/python3-python-beautifulsoup4-4.4.1-1.fc23 + exit 0 Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.OWu7WR + umask 022 + cd /builddir/build/BUILD + cd beautifulsoup4-4.4.1 + /usr/bin/python setup.py build running build running build_py creating build creating build/lib creating build/lib/bs4 copying bs4/dammit.py -> build/lib/bs4 copying bs4/element.py -> build/lib/bs4 copying bs4/diagnose.py -> build/lib/bs4 copying bs4/testing.py -> build/lib/bs4 copying bs4/__init__.py -> build/lib/bs4 creating build/lib/bs4/builder copying bs4/builder/_html5lib.py -> build/lib/bs4/builder copying bs4/builder/_lxml.py -> build/lib/bs4/builder copying bs4/builder/_htmlparser.py -> build/lib/bs4/builder copying bs4/builder/__init__.py -> build/lib/bs4/builder creating build/lib/bs4/tests copying bs4/tests/test_soup.py -> build/lib/bs4/tests copying bs4/tests/test_builder_registry.py -> build/lib/bs4/tests copying bs4/tests/test_tree.py -> build/lib/bs4/tests copying bs4/tests/test_lxml.py -> build/lib/bs4/tests copying bs4/tests/test_html5lib.py -> build/lib/bs4/tests copying bs4/tests/__init__.py -> build/lib/bs4/tests copying bs4/tests/test_docs.py -> build/lib/bs4/tests copying bs4/tests/test_htmlparser.py -> build/lib/bs4/tests ~/build/BUILD/python3-python-beautifulsoup4-4.4.1-1.fc23 ~/build/BUILD/beautifulsoup4-4.4.1 + pushd /builddir/build/BUILD/python3-python-beautifulsoup4-4.4.1-1.fc23 + 2to3 --write --nobackups . RefactoringTool: Skipping implicit fixer: buffer RefactoringTool: Skipping implicit fixer: idioms RefactoringTool: Skipping implicit fixer: set_literal RefactoringTool: Skipping implicit fixer: ws_comma RefactoringTool: No changes to ./setup.py RefactoringTool: Refactored ./bs4/__init__.py --- ./bs4/__init__.py (original) +++ ./bs4/__init__.py (refactored) @@ -45,7 +45,7 @@ # The very first thing we do is give a useful error if someone is # running this code under Python 3 without converting it. -'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'<>'You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).' +'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'!='You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).' class BeautifulSoup(Tag): """ @@ -69,7 +69,7 @@ like HTML's
tag), call handle_starttag and then handle_endtag. """ - ROOT_TAG_NAME = u'[document]' + ROOT_TAG_NAME = '[document]' # If the end-user gives no indication which tree builder they # want, look for one with these features. @@ -138,13 +138,13 @@ "fromEncoding", "from_encoding") if len(kwargs) > 0: - arg = kwargs.keys().pop() + arg = list(kwargs.keys()).pop() raise TypeError( "__init__() got an unexpected keyword argument '%s'" % arg) if builder is None: original_features = features - if isinstance(features, basestring): + if isinstance(features, str): features = [features] if features is None or len(features) == 0: features = self.DEFAULT_BUILDER_FEATURES @@ -178,7 +178,7 @@ # involving passing non-markup to Beautiful Soup. # Beautiful Soup will still parse the input as markup, # just in case that's what the user really wants. - if (isinstance(markup, unicode) + if (isinstance(markup, str) and not os.path.supports_unicode_filenames): possible_filename = markup.encode("utf8") else: @@ -186,13 +186,13 @@ is_file = False try: is_file = os.path.exists(possible_filename) - except Exception, e: + except Exception as e: # This is almost certainly a problem involving # characters not valid in filenames on this # system. Just let it go. pass if is_file: - if isinstance(markup, unicode): + if isinstance(markup, str): markup = markup.encode("utf8") warnings.warn( '"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup) @@ -200,8 +200,8 @@ # TODO: This is ugly but I couldn't get it to work in # Python 3 otherwise. if ((isinstance(markup, bytes) and not b' ' in markup) - or (isinstance(markup, unicode) and not u' ' in markup)): - if isinstance(markup, unicode): + or (isinstance(markup, str) and not ' ' in markup)): + if isinstance(markup, str): markup = markup.encode("utf8") warnings.warn( '"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup) @@ -286,7 +286,7 @@ def endData(self, containerClass=NavigableString): if self.current_data: - current_data = u''.join(self.current_data) + current_data = ''.join(self.current_data) # If whitespace is not preserved, and this string contains # nothing but ASCII spaces, replace it with a single space # or newline. @@ -429,9 +429,9 @@ encoding_part = '' if eventual_encoding != None: encoding_part = ' encoding="%s"' % eventual_encodiRefactoringTool: Refactored ./bs4/dammit.py RefactoringTool: Refactored ./bs4/diagnose.py ng - prefix = u'\n' % encoding_part + prefix = '\n' % encoding_part else: - prefix = u'' + prefix = '' if not pretty_print: indent_level = None else: @@ -465,4 +465,4 @@ if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin) - print soup.prettify() + print(soup.prettify()) --- ./bs4/dammit.py (original) +++ ./bs4/dammit.py (refactored) @@ -10,7 +10,7 @@ from pdb import set_trace import codecs -from htmlentitydefs import codepoint2name +from html.entities import codepoint2name import re import logging import string @@ -58,7 +58,7 @@ reverse_lookup = {} characters_for_re = [] for codepoint, name in list(codepoint2name.items()): - character = unichr(codepoint) + character = chr(codepoint) if codepoint != 34: # There's no point in turning the quotation mark into # ", unless it happens within an attribute value, which @@ -273,7 +273,7 @@ def strip_byte_order_mark(cls, data): """If a byte-order mark is present, strip it and return the encoding it implies.""" encoding = None - if isinstance(data, unicode): + if isinstance(data, str): # Unicode data cannot have a byte-order mark. return data, encoding if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \ @@ -351,9 +351,9 @@ markup, override_encodings, is_html, exclude_encodings) # Short-circuit if the data is in Unicode to begin with. - if isinstance(markup, unicode) or markup == '': + if isinstance(markup, str) or markup == '': self.markup = markup - self.unicode_markup = unicode(markup) + self.unicode_markup = str(markup) self.original_encoding = None return @@ -436,7 +436,7 @@ def _to_unicode(self, data, encoding, errors="strict"): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' - return unicode(data, encoding, errors) + return str(data, encoding, errors) @property def declared_html_encoding(self): --- ./bs4/diagnose.py (original) +++ ./bs4/diagnose.py (refactored) @@ -3,8 +3,8 @@ __license__ = "MIT" import cProfile -from StringIO import StringIO -from HTMLParser import HTMLParser +from io import StringIO +from html.parser import HTMLParser import bs4 from bs4 import BeautifulSoup, __version__ from bs4.builder import builder_registry @@ -20,8 +20,8 @@ def diagnose(data): """Diagnostic suite for isolating common problems.""" - print "Diagnostic running on Beautiful Soup %s" % __version__ - print "Python version %s" % sys.version + print("Diagnostic running on Beautiful Soup %s" % __version__) + print("Python version %s" % sys.version) basic_parsers = ["html.parser", "html5lib", "lxml"] for name in basic_parsers: @@ -30,16 +30,16 @@ break else: basic_parsers.remove(name) - print ( + print(( "I noticed that %s is not installed. Installing it may help." % - name) + name)) if 'lxml' in basic_parsers: basic_parsers.append(["lxml", "xml"]) try: from lxml import etree - print "Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION)) - except ImportError, e: + print("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))) + except ImportError as e: print ( "lxml is not installed or couldn't be imported.") @@ -47,36 +47,36 @@ if 'html5lib' in basic_parsers: try: import html5lib - print "Found html5lib version %s" % html5lib.__version__ - except ImportError, e: + print("Found html5lib version %s" % html5lib.__verRefactoringTool: Refactored ./bs4/element.py sion__) + except ImportError as e: print ( "html5lib is not installed or couldn't be imported.") if hasattr(data, 'read'): data = data.read() elif os.path.exists(data): - print '"%s" looks like a filename. Reading data from the file.' % data + print('"%s" looks like a filename. Reading data from the file.' % data) data = open(data).read() elif data.startswith("http:") or data.startswith("https:"): - print '"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data - print "You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup." + print('"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data) + print("You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup.") return - print + print() for parser in basic_parsers: - print "Trying to parse your markup with %s" % parser + print("Trying to parse your markup with %s" % parser) success = False try: soup = BeautifulSoup(data, parser) success = True - except Exception, e: - print "%s could not parse the markup." % parser + except Exception as e: + print("%s could not parse the markup." % parser) traceback.print_exc() if success: - print "Here's what %s did with the markup:" % parser - print soup.prettify() - - print "-" * 80 + print("Here's what %s did with the markup:" % parser) + print(soup.prettify()) + + print("-" * 80) def lxml_trace(data, html=True, **kwargs): """Print out the lxml events that occur during parsing. @@ -86,7 +86,7 @@ """ from lxml import etree for event, element in etree.iterparse(StringIO(data), html=html, **kwargs): - print("%s, %4s, %s" % (event, element.tag, element.text)) + print(("%s, %4s, %s" % (event, element.tag, element.text))) class AnnouncingParser(HTMLParser): """Announces HTMLParser parse events, without doing anything else.""" @@ -168,9 +168,9 @@ def benchmark_parsers(num_elements=100000): """Very basic head-to-head performance benchmark.""" - print "Comparative parser benchmark on Beautiful Soup %s" % __version__ + print("Comparative parser benchmark on Beautiful Soup %s" % __version__) data = rdoc(num_elements) - print "Generated a large invalid HTML document (%d bytes)." % len(data) + print("Generated a large invalid HTML document (%d bytes)." % len(data)) for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]: success = False @@ -179,24 +179,24 @@ soup = BeautifulSoup(data, parser) b = time.time() success = True - except Exception, e: - print "%s could not parse the markup." % parser + except Exception as e: + print("%s could not parse the markup." % parser) traceback.print_exc() if success: - print "BS4+%s parsed the markup in %.2fs." % (parser, b-a) + print("BS4+%s parsed the markup in %.2fs." % (parser, b-a)) from lxml import etree a = time.time() etree.HTML(data) b = time.time() - print "Raw lxml parsed the markup in %.2fs." % (b-a) + print("Raw lxml parsed the markup in %.2fs." % (b-a)) import html5lib parser = html5lib.HTMLParser() a = time.time() parser.parse(data) b = time.time() - print "Raw html5lib parsed the markup in %.2fs." % (b-a) + print("Raw html5lib parsed the markup in %.2fs." % (b-a)) def profile(num_elements=100000, parser="lxml"): --- ./bs4/element.py (original) +++ ./bs4/element.py (refactored) @@ -24,22 +24,22 @@ return alias -class NamespacedAttribute(unicode): +class NamespacedAttribute(str): def __new__(cls, prefix, name, namespace=None): if name is None: - obj = unicode.__new__(cls, prefix) + obj = str.__new__(cls, prefix) elif prefix is None: # Not really namespaced. - obj = unicode.__new__(cls, name) - else: - obj = unicode.__new__(cls, prefix + ":" + name) + obj = str.__new__(cls, name) + else: + obj = str.__new__(cls, prefix + ":" + name) obj.prefix = prefix obj.name = name obj.namespace = namespace return obj -class AttributeValueWithCharsetSubstitution(unicode): +class AttributeValueWithCharsetSubstitution(str): """A stand-in object for a character encoding specified in HTML.""" class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution): @@ -50,7 +50,7 @@ """ def __new__(cls, original_value): - obj = unicode.__new__(cls, original_value) + obj = str.__new__(cls, original_value) obj.original_value = original_value return obj @@ -73,9 +73,9 @@ match = cls.CHARSET_RE.search(original_value) if match is None: # No substitution necessary. - return unicode.__new__(unicode, original_value) - - obj = unicode.__new__(cls, original_value) + return str.__new__(str, original_value) + + obj = str.__new__(cls, original_value) obj.original_value = original_value return obj @@ -155,7 +155,7 @@ def format_string(self, s, formatter='minimal'): """Format the given string using the given formatter.""" - if not callable(formatter): + if not isinstance(formatter, collections.Callable): formatter = self._formatter_for_name(formatter) if formatter is None: output = s @@ -300,7 +300,7 @@ raise ValueError("Cannot insert None into a tag.") if new_child is self: raise ValueError("Cannot insert a tag into itself.") - if (isinstance(new_child, basestring) + if (isinstance(new_child, str) and not isinstance(new_child, NavigableString)): new_child = NavigableString(new_child) @@ -521,7 +521,7 @@ result = (element for element in generator if isinstance(element, Tag)) return ResultSet(strainer, result) - elif isinstance(name, basestring): + elif isinstance(name, str): # Optimization to find all tags with a given name. result = (element for element in generator if isinstance(element, Tag) @@ -672,7 +672,7 @@ return self.parents -class NavigableString(unicode, PageElement): +class NavigableString(str, PageElement): PREFIX = '' SUFFIX = '' @@ -685,10 +685,10 @@ passed in to the superclass's __new__ or the superclass won't know how to handle non-ASCII characters. """ - if isinstance(value, unicode): - u = unicode.__new__(cls, value) - else: - u = unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) + if isinstance(value, str): + u = str.__new__(cls, value) + else: + u = str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) u.setup() return u @@ -699,7 +699,7 @@ return type(self)(self) def __getnewargs__(self): - return (unicode(self),) + return (str(self),) def __getattr__(self, attr): """text.string gives you text. This is for backwards @@ -739,23 +739,23 @@ class CData(PreformattedString): - PREFIX = u'' + PREFIX = '' class ProcessingInstruction(PreformattedString): - PREFIX = u'' + PREFIX = '' class Comment(PreformattedString): - PREFIX = u'' + PREFIX = '' class Declaration(PreformattedString): - PREFIX = u'' + PREFIX = '' class Doctype(PreformattedString): @@ -772,8 +772,8 @@ return Doctype(value) - PREFIX = u'\n' + PREFIX = '\n' class Tag(PageElement): @@ -896,7 +896,7 @@ for string in self._all_strings(True): yield string - def get_text(self, separator=u"", strip=False, + def get_text(self, separator="", strip=False, types=(NavigableString, CData)): """ Get all child strings, concatenated using the given separator. @@ -968,7 +968,7 @@ def __contains__(self, x): return x in self.contents - def __nonzero__(self): + def __bool__(self): "A tag is non-None even if it has no contents." return True @@ -1077,7 +1077,7 @@ # First off, turn a string formatter into a function. This # will stop the lookup from happening over and over again. - if not callable(formatter): + if not isinstance(formatter, collections.Callable): formatter = self._formatter_for_name(formatter) attrs = [] @@ -1088,8 +1088,8 @@ else: if isinstance(val, list) or isinstance(val, tuple): val = ' '.join(val) - elif not isinstance(val, basestring): - val = unicode(val) + elif not isinstance(val, str): + val = str(val) elif ( isinstance(val, AttributeValueWithCharsetSubstitution) and eventual_encoding is not None): @@ -1097,7 +1097,7 @@ text = self.format_string(val, formatter) decoded = ( - unicode(key) + '=' + str(key) + '=' + EntitySubstitution.quoted_attribute_value(text)) attrs.append(decoded) close = '' @@ -1181,7 +1181,7 @@ """ # First off, turn a string formatter into a function. This # will stop the lookup from happening over and over again. - if not callable(formatter): + if not isinstance(formatter, collections.Callable): formatter = self._formatter_for_name(formatter) pretty_print = (indent_level is not None) @@ -1314,7 +1314,7 @@ 'Final combinator "%s" is missing an argument.' % tokens[-1]) if self._select_debug: - print 'Running CSS selector "%s"' % selector + print('Running CSS selector "%s"' % selector) for index, token in enumerate(tokens): new_context = [] @@ -1323,11 +1323,11 @@ if tokens[index-1] in self._selector_combinators: # This token was consumed by the previous combinator. Skip it. if self._select_debug: - print ' Token was consumed by the previous combinator.' + print(' Token was consumed by the previous combinator.') continue if self._select_debug: - print ' Considering token "%s"' % token + print(' Considering token "%s"' % token) recursive_candidate_generator = None tag_name = None @@ -1437,14 +1437,14 @@ next_token = tokens[index+1] def recursive_select(tag): if self._select_debug: - print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs) - print '-' * 40 + print(' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)) + print('-' * 40) for i in tag.select(next_token, recursive_candidate_generator): if self._select_debug: - print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs) + print('(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)) yield i if self._select_debug: - print '-' * 40 + print('-' * 40) _use_candidate_generator = recursive_select elif _candidate_generator is None: # By default, a tag's candidates are all of its @@ -1455,7 +1455,7 @@ check = "[any]" else: check = tag_name - print ' Default candidate generator, tag name="%s"' % check + print(' Default candidate generator, tag name="%s"' % check) if self._select_debug: # This is redundant with later code, but it stops # a bunch of bogus tags from cluttering up the @@ -1476,8 +1476,8 @@ count = 0 for tag in current_context: if self._select_debug: - print " Running candidate generator on %s %s" % ( - tag.name, repr(tag.attrs)) + print(" Running candidate generator on %s %s" % ( + tag.name, repr(tag.attrs))) for candidate in _use_candidate_generator(tag): if not isinstance(candidate, Tag): continue @@ -1492,7 +1492,7 @@ break if checker is None or result: if self._select_debug: - print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs)) + print(" SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))) if id(candidate) not in new_context_ids: # If a tag matches a selector more than once, # don't include it in the context more than once. @@ -1501,15 +1501,15 @@ if limit and len(new_context) >= limit: break elif self._select_debug: - print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs)) + print(" FAILURE %s %s" % (candidate.name, repr(candidate.attrs))) current_context = new_context if self._select_debug: - print "Final verdict:" + print("Final verdict:") for i in current_context: - print " %s %s" % (i.name, i.attrs) + print(" %s %s" % (i.name, i.attrs)) return current_context # Old names for backwards compatibility @@ -1553,7 +1553,7 @@ else: attrs = kwargs normalized_attrs = {} - for key, value in attrs.items(): + for key, value in list(attrs.items()): normalized_attrs[key] = self._normalize_search_value(value) self.attrs = normalized_attrs @@ -1562,7 +1562,7 @@ def _normalize_search_value(self, value): # Leave it alone if it's a Unicode string, a callable, a # regular expression, a boolean, or None. - if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match') + if (isinstance(value, str) or isinstance(value, collections.Callable) or hasattr(value, 'match') or isinstance(value, bool) or value is None): return value @@ -1575,7 +1575,7 @@ new_value = [] for v in value: if (hasattr(v, '__iter__') and not isinstance(v, bytes) - and not isinstance(v, unicode)): + and not isinstance(v, str)): # This is almost certainly the user's mistake. In the # interests of avoiding infinite loops, we'll let # it through as-is rather than doing a recursive call. @@ -1587,7 +1587,7 @@ # Otherwise, convert it into a Unicode string. # The unicode(str()) thing is so this will do the same thing on Python RefactoringTool: Refactored ./bs4/testing.py 2 # and Python 3. - return unicode(str(value)) + return str(str(value)) def __str__(self): if self.text: @@ -1641,7 +1641,7 @@ found = None # If given a list of items, scan it for a text element that # matches. - if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)): + if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, str)): for element in markup: if isinstance(element, NavigableString) \ and self.search(element): @@ -1654,7 +1654,7 @@ found = self.search_tag(markup) # If it's text, make sure the text matches. elif isinstance(markup, NavigableString) or \ - isinstance(markup, basestring): + isinstance(markup, str): if not self.name and not self.attrs and self._matches(markup, self.text): found = markup else: @@ -1668,7 +1668,7 @@ if isinstance(markup, list) or isinstance(markup, tuple): # This should only happen when searching a multi-valued attribute # like 'class'. - if (isinstance(match_against, unicode) + if (isinstance(match_against, str) and ' ' in match_against): # A bit of a special case. If they try to match "foo # bar" on a multivalue attribute's value, only accept @@ -1703,7 +1703,7 @@ # None matches None, False, an empty string, an empty list, and so on. return not match_against - if isinstance(match_against, unicode): + if isinstance(match_against, str): # Exact string match return markup == match_against --- ./bs4/testing.py (original) +++ ./bs4/testing.py (refactored) @@ -277,14 +277,14 @@ self.assertSoupEquals('', '') def test_entities_in_attributes_converted_to_unicode(self): - expect = u'

' + expect = '

' self.assertSoupEquals('

', expect) self.assertSoupEquals('

', expect) self.assertSoupEquals('

', expect) self.assertSoupEquals('

', expect) def test_entities_in_text_converted_to_unicode(self): - expect = u'

pi\N{LATIN SMALL LETTER N WITH TILDE}ata

' + expect = '

pi\N{LATIN SMALL LETTER N WITH TILDE}ata

' self.assertSoupEquals("

piñata

", expect) self.assertSoupEquals("

piñata

", expect) self.assertSoupEquals("

piñata

", expect) @@ -295,7 +295,7 @@ '

I said "good day!"

') def test_out_of_range_entity(self): - expect = u"\N{REPLACEMENT CHARACTER}" + expect = "\N{REPLACEMENT CHARACTER}" self.assertSoupEquals("�", expect) self.assertSoupEquals("�", expect) self.assertSoupEquals("�", expect) @@ -366,9 +366,9 @@ # A seemingly innocuous document... but it's in Unicode! And # it contains characters that can't be represented in the # encoding found in the declaration! The horror! - markup = u'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' - soup = self.soup(markup) - self.assertEqual(u'Sacr\xe9 bleu!', soup.body.string) + markup = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' + soup = self.soup(markup) + self.assertEqual('Sacr\xe9 bleu!', soup.body.string) def test_soupstrainer(self): """Parsers should be able to work with SoupStrainers.""" @@ -408,7 +408,7 @@ # Both XML and HTML entities are converted to Unicode characters # during parsing. RefactoringTool: Refactored ./bs4/builder/__init__.py text = "

<<sacré bleu!>>

" - expected = u"

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

" + expected = "

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

" self.assertSoupEquals(text, expected) def test_smart_quotes_converted_on_the_way_in(self): @@ -418,15 +418,15 @@ soup = self.soup(quote) self.assertEqual( soup.p.string, - u"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}") + "\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}") def test_non_breaking_spaces_converted_on_the_way_in(self): soup = self.soup("  ") - self.assertEqual(soup.a.string, u"\N{NO-BREAK SPACE}" * 2) + self.assertEqual(soup.a.string, "\N{NO-BREAK SPACE}" * 2) def test_entities_converted_on_the_way_out(self): text = "

<<sacré bleu!>>

" - expected = u"

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

".encode("utf-8") + expected = "

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

".encode("utf-8") soup = self.soup(text) self.assertEqual(soup.p.encode("utf-8"), expected) @@ -435,7 +435,7 @@ # easy-to-understand document. # Here it is in Unicode. Note that it claims to be in ISO-Latin-1. - unicode_html = u'

Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!

' + unicode_html = '

Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!

' # That's because we're going to encode it into ISO-Latin-1, and use # that to test. @@ -588,15 +588,15 @@ self.assertTrue(b"< < hey > >" in encoded) def test_can_parse_unicode_document(self): - markup = u'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' - soup = self.soup(markup) - self.assertEqual(u'Sacr\xe9 bleu!', soup.root.string) + markup = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' + soup = self.soup(markup) + self.assertEqual('Sacr\xe9 bleu!', soup.root.string) def test_popping_namespaced_tag(self): markup = 'b2012-07-02T20:33:42Zcd' soup = self.soup(markup) self.assertEqual( - unicode(soup.rss), markup) + str(soup.rss), markup) def test_docstring_includes_correct_encoding(self): soup = self.soup("") @@ -627,17 +627,17 @@ def test_closing_namespaced_tag(self): markup = '

20010504

' soup = self.soup(markup) - self.assertEqual(unicode(soup.p), markup) + self.assertEqual(str(soup.p), markup) def test_namespaced_attributes(self): markup = '' soup = self.soup(markup) - self.assertEqual(unicode(soup.foo), markup) + self.assertEqual(str(soup.foo), markup) def test_namespaced_attributes_xml_namespace(self): markup = 'bar' soup = self.soup(markup) - self.assertEqual(unicode(soup.foo), markup) + self.assertEqual(str(soup.foo), markup) class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): """Smoke test for a tree builder that supports HTML5.""" --- ./bs4/builder/__init__.py (original) +++ ./bs4/builder/__init__.py (refactored) @@ -156,13 +156,13 @@ universal = self.cdata_list_attributes.get('*', []) tRefactoringTool: Refactored ./bs4/builder/_html5lib.py RefactoringTool: Refactored ./bs4/builder/_htmlparser.py ag_specific = self.cdata_list_attributes.get( tag_name.lower(), None) - for attr in attrs.keys(): + for attr in list(attrs.keys()): if attr in universal or (tag_specific and attr in tag_specific): # We have a "class"-type attribute whose string # value is a whitespace-separated list of # values. Split it into a list. value = attrs[attr] - if isinstance(value, basestring): + if isinstance(value, str): values = whitespace_re.split(value) else: # html5lib sometimes calls setAttributes twice --- ./bs4/builder/_html5lib.py (original) +++ ./bs4/builder/_html5lib.py (refactored) @@ -50,7 +50,7 @@ doc = parser.parse(markup, encoding=self.user_specified_encoding) # Set the character encoding detected by the tokenizer. - if isinstance(markup, unicode): + if isinstance(markup, str): # We need to special-case this because html5lib sets # charEncoding to UTF-8 if it gets Unicode input. doc.original_encoding = None @@ -64,7 +64,7 @@ def test_fragment_to_document(self, fragment): """See `TreeBuilder`.""" - return u'%s' % fragment + return '%s' % fragment class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder): @@ -146,7 +146,7 @@ def appendChild(self, node): string_child = child = None - if isinstance(node, basestring): + if isinstance(node, str): # Some other piece of code decided to pass in a string # instead of creating a TextElement object to contain the # string. @@ -161,7 +161,7 @@ else: child = node.element - if not isinstance(child, basestring) and child.parent is not None: + if not isinstance(child, str) and child.parent is not None: node.element.extract() if (string_child and self.element.contents @@ -174,7 +174,7 @@ old_element.replace_with(new_element) self.soup._most_recent_element = new_element else: - if isinstance(node, basestring): + if isinstance(node, str): # Create a brand new NavigableString from this string. child = self.soup.new_string(node) @@ -212,7 +212,7 @@ self.soup.builder._replace_cdata_list_attribute_values( self.name, attributes) - for name, value in attributes.items(): + for name, value in list(attributes.items()): self.element[name] = value # The attributes may contain variables that need substitution. --- ./bs4/builder/_htmlparser.py (original) +++ ./bs4/builder/_htmlparser.py (refactored) @@ -4,11 +4,11 @@ 'HTMLParserTreeBuilder', ] -from HTMLParser import HTMLParser +from html.parser import HTMLParser try: - from HTMLParser import HTMLParseError -except ImportError, e: + from html.parser import HTMLParseError +except ImportError as e: # HTMLParseError is removed in Python 3.5. Since it can never be # thrown in 3.5, we can just define our own class as a placeholder. class HTMLParseError(Exception): @@ -79,9 +79,9 @@ real_name = int(name) try: - data = unichr(real_name) - except (ValueError, OverflowError), e: - data = u"\N{REPLACEMENT CHARACTER}" + data = chr(real_name) + except (ValueError, OverflowError) as e: + data = "\N{REPLACEMENT CHARACTER}" self.handle_data(data) @@ -145,7 +145,7 @@ declared within markup, whether any characters had to be replaced with REPLACEMENT CHARACTER). """ - if isinstance(markup, unicode): + if isinstance(markup, str): yield (markup, None, NonRefactoringTool: Refactored ./bs4/builder/_lxml.py e, False) return @@ -162,7 +162,7 @@ parser.soup = self.soup try: parser.feed(markup) - except HTMLParseError, e: + except HTMLParseError as e: warnings.warn(RuntimeWarning( "Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help.")) raise e --- ./bs4/builder/_lxml.py (original) +++ ./bs4/builder/_lxml.py (refactored) @@ -4,7 +4,7 @@ ] from io import BytesIO -from StringIO import StringIO +from io import StringIO import collections from lxml import etree from bs4.element import ( @@ -87,12 +87,12 @@ Each 4-tuple represents a strategy for parsing the document. """ - if isinstance(markup, unicode): + if isinstance(markup, str): # We were given Unicode. Maybe lxml can parse Unicode on # this system? yield markup, None, document_declared_encoding, False - if isinstance(markup, unicode): + if isinstance(markup, str): # No, apparently not. Convert the Unicode to UTF-8 and # tell lxml to parse it as UTF-8. yield (markup.encode("utf8"), "utf8", @@ -112,7 +112,7 @@ def feed(self, markup): if isinstance(markup, bytes): markup = BytesIO(markup) - elif isinstance(markup, unicode): + elif isinstance(markup, str): markup = StringIO(markup) # Call feed() at least once, even if the markup is empty, @@ -127,7 +127,7 @@ if len(data) != 0: self.parser.feed(data) self.parser.close() - except (UnicodeDecodeError, LookupError, etree.ParserError), e: + except (UnicodeDecodeError, LookupError, etree.ParserError) as e: raise ParserRejectedMarkup(str(e)) def close(self): @@ -145,12 +145,12 @@ self.nsmaps.append(None) elif len(nsmap) > 0: # A new namespace mapping has come into play. - inverted_nsmap = dict((value, key) for key, value in nsmap.items()) + inverted_nsmap = dict((value, key) for key, value in list(nsmap.items())) self.nsmaps.append(inverted_nsmap) # Also treat the namespace mapping as a set of attributes on the # tag, so we can recreate it later. attrs = attrs.copy() - for prefix, namespace in nsmap.items(): + for prefix, namespace in list(nsmap.items()): attribute = NamespacedAttribute( "xmlns", prefix, "http://www.w3.org/2000/xmlns/") attrs[attribute] = namespace @@ -159,7 +159,7 @@ # from lxml with namespaces attached to their names, and # turn then into NamespacedAttribute objects. new_attrs = {} - for attr, value in attrs.items(): + for attr, value in list(attrs.items()): namespace, attr = self._getNsTag(attr) if namespace is None: new_attrs[attr] = value @@ -219,7 +219,7 @@ def test_fragment_to_document(self, fragment): """See `TreeBuilder`.""" - return u'\n%s' % fragment + return '\n%s' % fragment class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML): @@ -239,10 +239,10 @@ self.parser = self.parser_for(encoding) self.parser.feed(markup) self.parser.close() - except (UnicodeDecodeError, LookupError, etree.ParserError), e: + except (UnicodeDecodeError, LookupError, etree.ParserError) as e: raise ParserRejectedMarkup(str(e)) def test_fragment_to_document(self, fragment): """See `TreeBuilder`.""" - return u'%s' RefactoringTool: No changes to ./bs4/tests/__init__.py RefactoringTool: No changes to ./bs4/tests/test_builder_registry.py RefactoringTool: No changes to ./bs4/tests/test_docs.py RefactoringTool: Refactored ./bs4/tests/test_html5lib.py RefactoringTool: No changes to ./bs4/tests/test_htmlparser.py RefactoringTool: Refactored ./bs4/tests/test_lxml.py RefactoringTool: Refactored ./bs4/tests/test_soup.py WARNING: couldn't encode ./bs4/tests/test_soup.py's diff for your terminal RefactoringTool: Refactored ./bs4/tests/test_tree.py WARNING: couldn't encode ./bs4/tests/test_tree.py's diff for your terminal RefactoringTool: Refactored ./doc/source/conf.py % fragment + return '%s' % fragment --- ./bs4/tests/test_html5lib.py (original) +++ ./bs4/tests/test_html5lib.py (refactored) @@ -5,7 +5,7 @@ try: from bs4.builder import HTML5TreeBuilder HTML5LIB_PRESENT = True -except ImportError, e: +except ImportError as e: HTML5LIB_PRESENT = False from bs4.element import SoupStrainer from bs4.testing import ( @@ -74,14 +74,14 @@ def test_reparented_markup(self): markup = '

foo

\n

bar

' soup = self.soup(markup) - self.assertEqual(u"

foo

\n

bar

", soup.body.decode()) + self.assertEqual("

foo

\n

bar

", soup.body.decode()) self.assertEqual(2, len(soup.find_all('p'))) def test_reparented_markup_ends_with_whitespace(self): markup = '

foo

\n

bar

\n' soup = self.soup(markup) - self.assertEqual(u"

foo

\n

bar

\n", soup.body.decode()) + self.assertEqual("

foo

\n

bar

\n", soup.body.decode()) self.assertEqual(2, len(soup.find_all('p'))) def test_processing_instruction(self): --- ./bs4/tests/test_lxml.py (original) +++ ./bs4/tests/test_lxml.py (refactored) @@ -7,7 +7,7 @@ import lxml.etree LXML_PRESENT = True LXML_VERSION = lxml.etree.LXML_VERSION -except ImportError, e: +except ImportError as e: LXML_PRESENT = False LXML_VERSION = (0,) @@ -62,7 +62,7 @@ # if one is installed. with warnings.catch_warnings(record=True) as w: soup = BeautifulStoneSoup("") - self.assertEqual(u"", unicode(soup.b)) + self.assertEqual("", str(soup.b)) self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message)) @skipIf( --- ./bs4/tests/test_soup.py (original) +++ ./bs4/tests/test_soup.py (refactored) @@ -32,7 +32,7 @@ try: from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML LXML_PRESENT = True -except ImportError, e: +except ImportError as e: LXML_PRESENT = False PYTHON_2_PRE_2_7 = (sys.version_info < (2,7)) @@ -41,17 +41,17 @@ class TestConstructor(SoupTest): def test_short_unicode_input(self): --- ./bs4/tests/test_tree.py (original) +++ ./bs4/tests/test_tree.py (refactored) @@ -70,13 +70,13 @@ self.assertEqual(soup.find("b").string, "2") def test_unicode_text_find(self): --- ./doc/source/conf.py (original) +++ ./doc/source/conf.py (refactored) @@ -40,8 +40,8 @@ master_doc = 'index' # General information about the project. -project = u'Beautiful Soup' -copyright = u'2004-2015, Leonard Richardson' +project = 'Beautiful Soup' +copyright = '2004-2015, Leonard Richardson' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -178,8 +178,8 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'BeautifulSoup.tex', u'Beautiful Soup Documentation', - u'Leonard Richardson', 'manual'), + ('index', 'BeautifulSoup.tex', 'Beautiful Soup Documentation', + 'Leonard Richardson', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -211,18 +211,18 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - ('index', 'beautifulsoup', u'Beautiful Soup Documentation', - [u'Leonard Richardson'], 1) + ('index', 'beautifulsoup', 'Beautiful Soup Documentation', + ['Leonard Richardson'], 1) ] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. -epub_title = u'Beautiful Soup' -epub_author = u'Leonard RichardsoRefactoringTool: Refactored ./doc.zh/source/conf.py RefactoringTool: Refactored ./scripts/demonstrate_parser_differences.py n' -epub_publisher = u'Leonard Richardson' -epub_copyright = u'2012, Leonard Richardson' +epub_title = 'Beautiful Soup' +epub_author = 'Leonard Richardson' +epub_publisher = 'Leonard Richardson' +epub_copyright = '2012, Leonard Richardson' # The language of the text. It defaults to the language option # or en if the language is not set. --- ./doc.zh/source/conf.py (original) +++ ./doc.zh/source/conf.py (refactored) @@ -40,8 +40,8 @@ master_doc = 'index' # General information about the project. -project = u'Beautiful Soup' -copyright = u'2012, Leonard Richardson' +project = 'Beautiful Soup' +copyright = '2012, Leonard Richardson' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -178,8 +178,8 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'BeautifulSoup.tex', u'Beautiful Soup Documentation', - u'Leonard Richardson', 'manual'), + ('index', 'BeautifulSoup.tex', 'Beautiful Soup Documentation', + 'Leonard Richardson', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -211,18 +211,18 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - ('index', 'beautifulsoup', u'Beautiful Soup Documentation', - [u'Leonard Richardson'], 1) + ('index', 'beautifulsoup', 'Beautiful Soup Documentation', + ['Leonard Richardson'], 1) ] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. -epub_title = u'Beautiful Soup' -epub_author = u'Leonard Richardson' -epub_publisher = u'Leonard Richardson' -epub_copyright = u'2012, Leonard Richardson' +epub_title = 'Beautiful Soup' +epub_author = 'Leonard Richardson' +epub_publisher = 'Leonard Richardson' +epub_copyright = '2012, Leonard Richardson' # The language of the text. It defaults to the language option # or en if the language is not set. --- ./scripts/demonstrate_parser_differences.py (original) +++ ./scripts/demonstrate_parser_differences.py (refactored) @@ -22,13 +22,13 @@ try: from bs4.builder import _lxml parsers.append('lxml') -except ImportError, e: +except ImportError as e: pass try: from bs4.builder import _html5lib parsers.append('html5lib') -except ImportError, e: +except ImportError as e: pass class Demonstration(object): @@ -47,7 +47,7 @@ output = soup.div else: output = soup - except Exception, e: + except Exception as e: output = "[EXCEPTION] %s" % str(e) self.results[parser] = output if previous_output is None: @@ -57,15 +57,15 @@ return uniform_results def dump(self): - print "%s: %s" % ("Markup".rjust(13), self.markup.encode("utf8")) - for parser, output in self.results.items(): - print "%s: %s" % (parser.rjust(13), output.encode("utf8")) + print("%s: %s" % ("Markup".rjust(13), self.markup.encode("utf8"))) + for parser, output in list(self.results.items()): + print("%s: %s" % (parser.rjust(13), output.encode("utf8"))) different_results = [] uniform_results = [] -print "= Testing the following parsers: %s =" % ", ".join(parsers) -print +print("= Testing the following parsers: %s =" % ", ".join(parsers)) +print() input_file = sys.stdin if sys.stdin.isatty(): @@ -83,13 +83,13 @@ else: different_results.append(demo) -print "== Markup that's handled the same in every parser ==" -print +print("== Markup that's handled the same in every parser ==") +print() for demo in uniform_results: demo.dump() - print -print "== Markup that's not handled the same in every parser ==" -print + print() +print("== Markup that's not handled the same in every parser ==") +print() for demo in different_results: demo.dump() - print + print() RefactoringTool: Files that were modified: RefactoringTool: ./setup.py RefactoringTool: ./bs4/__init__.py RefactoringTool: ./bs4/dammit.py RefactoringTool: ./bs4/diagnose.py RefactoringTool: ./bs4/element.py RefactoringTool: ./bs4/testing.py RefactoringTool: ./bs4/builder/__init__.py RefactoringTool: ./bs4/builder/_html5lib.py RefactoringTool: ./bs4/builder/_htmlparser.py RefactoringTool: ./bs4/builder/_lxml.py RefactoringTool: ./bs4/tests/__init__.py RefactoringTool: ./bs4/tests/test_builder_registry.py RefactoringTool: ./bs4/tests/test_docs.py RefactoringTool: ./bs4/tests/test_html5lib.py RefactoringTool: ./bs4/tests/test_htmlparser.py RefactoringTool: ./bs4/tests/test_lxml.py RefactoringTool: ./bs4/tests/test_soup.py RefactoringTool: ./bs4/tests/test_tree.py RefactoringTool: ./doc/source/conf.py RefactoringTool: ./doc.zh/source/conf.py RefactoringTool: ./scripts/demonstrate_parser_differences.py + /usr/bin/python3 setup.py build running build running build_py creating build creating build/lib creating build/lib/bs4 copying bs4/dammit.py -> build/lib/bs4 copying bs4/element.py -> build/lib/bs4 copying bs4/diagnose.py -> build/lib/bs4 copying bs4/testing.py -> build/lib/bs4 copying bs4/__init__.py -> build/lib/bs4 creating build/lib/bs4/builder copying bs4/builder/_html5lib.py -> build/lib/bs4/builder copying bs4/builder/_lxml.py -> build/lib/bs4/builder copying bs4/builder/_htmlparser.py -> build/lib/bs4/builder copying bs4/builder/__init__.py -> build/lib/bs4/builder creating build/lib/bs4/tests copying bs4/tests/test_soup.py -> build/lib/bs4/tests copying bs4/tests/test_builder_registry.py -> build/lib/bs4/tests copying bs4/tests/test_tree.py -> build/lib/bs4/tests copying bs4/tests/test_lxml.py -> build/lib/bs4/tests copying bs4/tests/test_html5lib.py -> build/lib/bs4/tests copying bs4/tests/__init__.py -> build/lib/bs4/tests copying bs4/tests/test_docs.py -> build/lib/bs4/tests copying bs4/tests/test_htmlparser.py -> build/lib/bs4/tests Fixing build/lib/bs4/dammit.py build/lib/bs4/element.py build/lib/bs4/diagnose.py build/lib/bs4/testing.py build/lib/bs4/__init__.py build/lib/bs4/builder/_html5lib.py build/lib/bs4/builder/_lxml.py build/lib/bs4/builder/_htmlparser.py build/lib/bs4/builder/__init__.py build/lib/bs4/tests/test_soup.py build/lib/bs4/tests/test_builder_registry.py build/lib/bs4/tests/test_tree.py build/lib/bs4/tests/test_lxml.py build/lib/bs4/tests/test_html5lib.py build/lib/bs4/tests/__init__.py build/lib/bs4/tests/test_docs.py build/lib/bs4/tests/test_htmlparser.py Skipping implicit fixer: buffer Skipping implicit fixer: idioms Skipping implicit fixer: set_literal Skipping implicit fixer: ws_comma Fixing build/lib/bs4/dammit.py build/lib/bs4/element.py build/lib/bs4/diagnose.py build/lib/bs4/testing.py build/lib/bs4/__init__.py build/lib/bs4/builder/_html5lib.py build/lib/bs4/builder/_lxml.py build/lib/bs4/builder/_htmlparser.py build/lib/bs4/builder/__init__.py build/lib/bs4/tests/test_soup.py build/lib/bs4/tests/test_builder_registry.py build/lib/bs4/tests/test_tree.py build/lib/bs4/tests/test_lxml.py build/lib/bs4/tests/test_html5lib.py build/lib/bs4/tests/__init__.py build/lib/bs4/tests/test_docs.py build/lib/bs4/tests/test_htmlparser.py Skipping implicit fixer: buffer Skipping implicit fixer: idioms Skipping implicit fixer: set_literal Skipping implicit fixer: ws_comma + exit 0 Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.FiS9YE + umask 022 + cd /builddir/build/BUILD + '[' /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch '!=' / ']' + rm -rf /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch ++ dirname /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch + mkdir -p /builddir/build/BUILDROOT + mkdir /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch + cd beautifulsoup4-4.4.1 + /usr/bin/python setup.py install -O1 --skip-build --root /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch running install running install_lib creating /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr creating /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib creating /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7 creating /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages creating /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4 copying build/lib/bs4/dammit.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4 copying build/lib/bs4/element.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4 copying build/lib/bs4/diagnose.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4 copying build/lib/bs4/testing.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4 copying build/lib/bs4/__init__.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4 creating /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/builder copying build/lib/bs4/builder/_html5lib.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/builder copying build/lib/bs4/builder/_lxml.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/builder copying build/lib/bs4/builder/_htmlparser.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/builder copying build/lib/bs4/builder/__init__.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/builder creating /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests copying build/lib/bs4/tests/test_soup.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests copying build/lib/bs4/tests/test_builder_registry.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests copying build/lib/bs4/tests/test_tree.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests copying build/lib/bs4/tests/test_lxml.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests copying build/lib/bs4/tests/test_html5lib.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests copying build/lib/bs4/tests/__init__.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests copying build/lib/bs4/tests/test_docs.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests copying build/lib/bs4/tests/test_htmlparser.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/dammit.py to dammit.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/element.py to element.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/diagnose.py to diagnose.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/testing.py to testing.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/__init__.py to __init__.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/builder/_html5lib.py to _html5lib.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/builder/_lxml.py to _lxml.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/builder/_htmlparser.py to _htmlparser.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/builder/__init__.py to __init__.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests/test_soup.py to test_soup.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests/test_builder_registry.py to test_builder_registry.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests/test_tree.py to test_tree.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests/test_lxml.py to test_lxml.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests/test_html5lib.py to test_html5lib.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests/__init__.py to __init__.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests/test_docs.py to test_docs.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/bs4/tests/test_htmlparser.py to test_htmlparser.pyc writing byte-compilation script '/tmp/tmp3wMFqV.py' /usr/bin/python -O /tmp/tmp3wMFqV.py removing /tmp/tmp3wMFqV.py running install_egg_info running egg_info writing requirements to beautifulsoup4.egg-info/requires.txt writing beautifulsoup4.egg-info/PKG-INFO writing top-level names to beautifulsoup4.egg-info/top_level.txt writing dependency_links to beautifulsoup4.egg-info/dependency_links.txt reading manifest file 'beautifulsoup4.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' writing manifest file 'beautifulsoup4.egg-info/SOURCES.txt' Copying beautifulsoup4.egg-info to /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7/site-packages/beautifulsoup4-4.4.1-py2.7.egg-info running install_scripts + pushd /builddir/build/BUILD/python3-python-beautifulsoup4-4.4.1-1.fc23 ~/build/BUILD/python3-python-beautifulsoup4-4.4.1-1.fc23 ~/build/BUILD/beautifulsoup4-4.4.1 + /usr/bin/python3 setup.py install -O1 --skip-build --root /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch running install running install_lib creating /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4 creating /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages creating /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4 copying build/lib/bs4/dammit.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4 copying build/lib/bs4/element.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4 copying build/lib/bs4/diagnose.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4 copying build/lib/bs4/testing.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4 copying build/lib/bs4/__init__.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4 creating /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/builder copying build/lib/bs4/builder/_html5lib.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/builder copying build/lib/bs4/builder/_lxml.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/builder copying build/lib/bs4/builder/_htmlparser.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/builder copying build/lib/bs4/builder/__init__.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/builder creating /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests copying build/lib/bs4/tests/test_soup.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests copying build/lib/bs4/tests/test_builder_registry.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests copying build/lib/bs4/tests/test_tree.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests copying build/lib/bs4/tests/test_lxml.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests copying build/lib/bs4/tests/test_html5lib.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests copying build/lib/bs4/tests/__init__.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests copying build/lib/bs4/tests/test_docs.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests copying build/lib/bs4/tests/test_htmlparser.py -> /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/dammit.py to dammit.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/element.py to element.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/diagnose.py to diagnose.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/testing.py to testing.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/__init__.py to __init__.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/builder/_html5lib.py to _html5lib.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/builder/_lxml.py to _lxml.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/builder/_htmlparser.py to _htmlparser.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/builder/__init__.py to __init__.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests/test_soup.py to test_soup.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests/test_builder_registry.py to test_builder_registry.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests/test_tree.py to test_tree.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests/test_lxml.py to test_lxml.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests/test_html5lib.py to test_html5lib.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests/__init__.py to __init__.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests/test_docs.py to test_docs.cpython-34.pyc byte-compiling /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/bs4/tests/test_htmlparser.py to test_htmlparser.cpython-34.pyc writing byte-compilation script '/tmp/tmpf5xia2j6.py' /usr/bin/python3 -O /tmp/tmpf5xia2j6.py removing /tmp/tmpf5xia2j6.py running install_egg_info running egg_info writing top-level names to beautifulsoup4.egg-info/top_level.txt writing dependency_links to beautifulsoup4.egg-info/dependency_links.txt writing requirements to beautifulsoup4.egg-info/requires.txt writing beautifulsoup4.egg-info/PKG-INFO reading manifest file 'beautifulsoup4.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' writing manifest file 'beautifulsoup4.egg-info/SOURCES.txt' Copying beautifulsoup4.egg-info to /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4/site-packages/beautifulsoup4-4.4.1-py3.4.egg-info running install_scripts + /usr/lib/rpm/find-debuginfo.sh --strict-build-id -m --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 50000000 /builddir/build/BUILD/beautifulsoup4-4.4.1 /usr/lib/rpm/sepdebugcrcfix: Updated 0 CRC32s, 0 CRC32s did match. find: 'debug': No such file or directory + /usr/lib/rpm/check-buildroot + /usr/lib/rpm/brp-compress + /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip + /usr/lib/rpm/brp-python-bytecompile /usr/bin/python 1 Bytecompiling .py files below /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python3.4 using /usr/bin/python3.4 Bytecompiling .py files below /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/lib/python2.7 using /usr/bin/python2.7 + /usr/lib/rpm/brp-python-hardlink + /usr/lib/rpm/redhat/brp-java-repack-jars Executing(%check): /bin/sh -e /var/tmp/rpm-tmp.FNqRkw + umask 022 + cd /builddir/build/BUILD + cd beautifulsoup4-4.4.1 + /usr/bin/python -m unittest discover -s bs4 ........................................................................................................................................................................................................................................................................................................................................................................................................................................................... ---------------------------------------------------------------------- Ran 443 tests in 0.668s OK + pushd /builddir/build/BUILD/python3-python-beautifulsoup4-4.4.1-1.fc23 ~/build/BUILD/python3-python-beautifulsoup4-4.4.1-1.fc23 ~/build/BUILD/beautifulsoup4-4.4.1 + /usr/bin/python3 -m unittest discover -s bs4 ........................................................................................................................................................................................................................................................................................................................................................................................................................................................... ---------------------------------------------------------------------- Ran 443 tests in 0.845s OK Processing files: python-beautifulsoup4-4.4.1-1.fc23.noarch Executing(%doc): /bin/sh -e /var/tmp/rpm-tmp.eMJnhs + exit 0 + umask 022 + cd /builddir/build/BUILD + cd beautifulsoup4-4.4.1 + DOCDIR=/builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/share/doc/python-beautifulsoup4 + export DOCDIR + /usr/bin/mkdir -p /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/share/doc/python-beautifulsoup4 + cp -pr AUTHORS.txt COPYING.txt NEWS.txt README.txt TODO.txt /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/share/doc/python-beautifulsoup4 + exit 0 Provides: python-beautifulsoup4 = 4.4.1-1.fc23 Requires(rpmlib): rpmlib(CompressedFileNames) <= 3.0.4-1 rpmlib(FileDigests) <= 4.6.0-1 rpmlib(PartialHardlinkSets) <= 4.0.4-1 rpmlib(PayloadFilesHavePrefix) <= 4.0-1 Requires: python(abi) = 2.7 Processing files: python3-beautifulsoup4-4.4.1-1.fc23.noarch Executing(%doc): /bin/sh -e /var/tmp/rpm-tmp.F6Aygp + umask 022 + cd /builddir/build/BUILD + cd beautifulsoup4-4.4.1 + DOCDIR=/builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/share/doc/python3-beautifulsoup4 + export DOCDIR + /usr/bin/mkdir -p /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/share/doc/python3-beautifulsoup4 + cp -pr AUTHORS.txt COPYING.txt NEWS.txt README.txt TODO.txt /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch/usr/share/doc/python3-beautifulsoup4 + exit 0 Provides: python3-beautifulsoup4 = 4.4.1-1.fc23 Requires(rpmlib): rpmlib(CompressedFileNames) <= 3.0.4-1 rpmlib(FileDigests) <= 4.6.0-1 rpmlib(PartialHardlinkSets) <= 4.0.4-1 rpmlib(PayloadFilesHavePrefix) <= 4.0-1 Requires: python(abi) = 3.4 Obsoletes: python3-BeautifulSoup < 1:3.2.1-2 Checking for unpackaged file(s): /usr/lib/rpm/check-files /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch warning: Could not canonicalize hostname: buildvm-21.phx2.fedoraproject.org Wrote: /builddir/build/RPMS/python-beautifulsoup4-4.4.1-1.fc23.noarch.rpm Wrote: /builddir/build/RPMS/python3-beautifulsoup4-4.4.1-1.fc23.noarch.rpm Executing(%clean): /bin/sh -e /var/tmp/rpm-tmp.tTaNPn + umask 022 + cd /builddir/build/BUILD + cd beautifulsoup4-4.4.1 + /usr/bin/rm -rf /builddir/build/BUILDROOT/python-beautifulsoup4-4.4.1-1.fc23.noarch + exit 0 Child return code was: 0 LEAVE do -->