use six.moves to work around urllib / urllib2 / urlparse to urllib 2to3 cleanup
authorDiane Trout <diane@ghic.org>
Thu, 29 Jan 2015 21:39:32 +0000 (13:39 -0800)
committerDiane Trout <diane@ghic.org>
Thu, 29 Jan 2015 21:39:32 +0000 (13:39 -0800)
12 files changed:
encode_submission/encode3.py
encode_submission/encode_find.py
encode_submission/geo_gather.py
encode_submission/ucsc_gather.py
htsworkflow/automation/copier.py
htsworkflow/pipelines/sequences.py
htsworkflow/submission/condorfastq.py
htsworkflow/submission/daf.py
htsworkflow/submission/encoded.py
htsworkflow/submission/ucsc.py
htsworkflow/util/rdfhelp.py
scripts/htsw-record-runfolder

index 29352d5a4d6ad561b3148a39310c8ec580ff956b..82e0401215e5fc3bb85cf25ee8b3b9a5159c9999 100644 (file)
@@ -18,9 +18,6 @@ import stat
 import sys
 import time
 import types
-import urllib
-import urllib2
-import urlparse
 from zipfile import ZipFile
 
 import RDF
index 870a585b3e5f6f12ba4b46bd64cec076d8d79066..dd02f31befe84f3d5df1f1cb4d2c3704a25c0030 100644 (file)
@@ -18,8 +18,7 @@ import re
 # redland rdf lib
 import RDF
 import sys
-import urllib
-import urlparse
+from six.moves import urllib
 
 if not 'DJANGO_SETTINGS_MODULE' in os.environ:
     os.environ['DJANGO_SETTINGS_MODULE'] = 'htsworkflow.settings'
@@ -483,7 +482,7 @@ def reload_libraries(model, library_list):
         load_library_detail(model, library_urn)
 
 def user_library_id_to_library_urn(library_id):
-    split_url = urlparse.urlsplit(library_id)
+    split_url = urllib.parse.urlsplit(library_id)
     if len(split_url.scheme) == 0:
         return LIBRARY_NS[library_id]
     else:
@@ -645,7 +644,7 @@ def login(cookie=None):
     response, content = http.request(LOGIN_URL,
                                      'POST',
                                      headers=headers,
-                                     body=urllib.urlencode(credentials))
+                                     body=urllib.parse.urlencode(credentials))
     LOGGER.debug("Login to {0}, status {1}".format(LOGIN_URL,
                                                     response['status']))
 
index 53e2d4441663f8088ad9b96a248c1600abb01da0..46d7635d2f8e319fe12b9e49b090b66a741c47db 100644 (file)
@@ -16,9 +16,6 @@ import stat
 import sys
 import time
 import types
-import urllib
-import urllib2
-import urlparse
 from zipfile import ZipFile
 
 import RDF
index 6961fa9bdf30818512f78815223fcc8a9137c6ce..4fa816477c207ced4919b309975ac97d3047df61 100644 (file)
@@ -16,9 +16,6 @@ import stat
 import sys
 import time
 import types
-import urllib
-import urllib2
-import urlparse
 from zipfile import ZipFile
 
 import RDF
index 1dc719d96b0c0bdfc851e0ef3e56e162b00e77bc..1ba0ab26c8956084498508d19f99bb552fb3bf19 100644 (file)
@@ -9,7 +9,7 @@ import subprocess
 import sys
 import time
 import traceback
-import urlparse
+from six.moves import urllib
 
 from benderjab import rpc
 
@@ -289,13 +289,13 @@ class CopierBot(rpc.XmlRpcBot):
         return reply
 
     def validate_url(self, url):
-        user_url = urlparse.urlsplit(url)
+        user_url = urllib.parse.urlsplit(url)
         user_scheme = user_url[0]
         user_netloc = user_url[1]
         user_path = user_url[2]
 
         for source in self.sources:
-            source_url = urlparse.urlsplit(source)
+            source_url = urllib.parse.urlsplit(source)
             source_scheme = source_url[0]
             source_netloc = source_url[1]
             source_path = source_url[2]
index 2aba7099915b95fe0f2097e4445c42dd9b805941..cc6f8ba389e235a8bc4aa03465372aea77112b86 100644 (file)
@@ -7,7 +7,7 @@ import os
 import types
 import re
 import sys
-from urlparse import urljoin, urlparse
+from six.moves.urllib.parse import urljoin, urlparse
 
 import RDF
 from htsworkflow.util.rdfhelp import libraryOntology as libNS
index 37d60edf9572ff8c51e6328b88ecc53467496ae8..8f9fccb7b897a677d02971fc45c791488b6237db 100644 (file)
@@ -5,7 +5,7 @@ import os
 from pprint import pformat,pprint
 import sys
 import types
-from urlparse import urljoin, urlparse
+from six.moves.urllib.parse import urljoin, urlparse
 
 from htsworkflow.pipelines.sequences import scan_for_sequences, \
      update_model_sequence_library
index 35b9a6c7d22a55cd52c833f4db8e599d2c34bea3..9c4b30e8c1a33da18d67634671e985ca3fe51702 100644 (file)
@@ -7,7 +7,7 @@ import re
 import string
 from six.moves import StringIO
 import types
-import urlparse
+from six.moves import urllib
 
 import RDF
 from htsworkflow.util.rdfhelp import \
@@ -232,7 +232,7 @@ def submission_uri_to_string(submission_uri):
 
 def get_view_namespace(submission_uri):
     submission_uri = submission_uri_to_string(submission_uri)
-    view_uri = urlparse.urljoin(submission_uri, 'view/')
+    view_uri = urllib.parse.urljoin(submission_uri, 'view/')
     viewNS = RDF.NS(view_uri)
     return viewNS
 
index 29f6eccd0a8a47a61eb85a99f74e323f52765272..334a03f24db2ec7945aa34d94aff41cdc60c8df0 100644 (file)
@@ -12,7 +12,7 @@ import jsonschema
 import os
 import requests
 import types
-from urlparse import urljoin, urlparse, urlunparse
+from six.moves.urllib.parse import urljoin, urlparse, urlunparse
 
 LOGGER = logging.getLogger(__name__)
 
index ff717366f2066b14e8f48213834d1ec27001f3e8..f668a2c44a67b9db10a84c6c275cdd8655f1ab46 100644 (file)
@@ -1,8 +1,7 @@
 """Utilities for extracting information from the ENCODE DCC
 """
 import logging
-import urlparse
-import urllib2
+from six.moves import urllib
 
 LOGGER = logging.getLogger(__name__)
 
@@ -21,7 +20,7 @@ def ddf_download_url(submission_id):
     'http://encodesubmit.ucsc.edu/pipeline/download_ddf/1234'
     """
     fragment = 'download_ddf/%s' % (submission_id,)
-    return urlparse.urljoin(UCSCEncodePipeline, fragment)
+    return urllib.parse.urljoin(UCSCEncodePipeline, fragment)
 
 
 def daf_download_url(submission_id):
@@ -31,7 +30,7 @@ def daf_download_url(submission_id):
     'http://encodesubmit.ucsc.edu/pipeline/download_daf/1234'
     """
     fragment = 'download_daf/%s' % (submission_id,)
-    return urlparse.urljoin(UCSCEncodePipeline, fragment)
+    return urllib.parse.urljoin(UCSCEncodePipeline, fragment)
 
 
 def submission_view_url(submission_id):
@@ -41,7 +40,7 @@ def submission_view_url(submission_id):
     'http://encodesubmit.ucsc.edu/pipeline/show/1234'
     """
     fragment = 'show/%s' % (submission_id,)
-    return urlparse.urljoin(UCSCEncodePipeline, fragment)
+    return urllib.parse.urljoin(UCSCEncodePipeline, fragment)
 
 
 def get_encodedcc_file_index(genome, composite):
@@ -58,10 +57,10 @@ def get_encodedcc_file_index(genome, composite):
         request_url = base_url + 'files.txt'
 
         try:
-            request = urllib2.urlopen(request_url)
+            request = urllib.request.urlopen(request_url)
             file_index = parse_ucsc_file_index(request, base_url)
             return file_index
-        except urllib2.HTTPError as e:
+        except urllib.request.HTTPError as e:
             err = e
             pass
 
index 8fa4d0b7962f2d97b363be4bd11b2c31f90b815c..89db4832dd4d4a516d13e2fa2dc012fded964b32 100644 (file)
@@ -5,8 +5,7 @@ from __future__ import print_function
 import collections
 from datetime import datetime
 from glob import glob
-from urlparse import urlparse, urlunparse
-from urllib2 import urlopen
+from six.moves import urllib
 import logging
 import os
 import sys
@@ -205,7 +204,7 @@ def simplify_uri(uri):
     if isinstance(uri, RDF.Uri):
         uri = str(uri)
 
-    parsed = urlparse(uri)
+    parsed = urllib.parse.urlparse(uri)
     if len(parsed.query) > 0:
         return parsed.query
     elif len(parsed.fragment) > 0:
@@ -262,13 +261,13 @@ def load_into_model(model, parser_name, path, ns=None):
         else:
             raise ValueError("url to load can't be a RDF literal")
 
-    url_parts = list(urlparse(path))
+    url_parts = list(urllib.parse.urlparse(path))
     if len(url_parts[0]) == 0 or url_parts[0] == 'file':
         url_parts[0] = 'file'
         url_parts[2] = os.path.abspath(url_parts[2])
     if parser_name is None or parser_name == 'guess':
         parser_name = guess_parser_by_extension(path)
-    url = urlunparse(url_parts)
+    url = urllib.parse.urlunparse(url_parts)
     logger.info("Opening {0} with parser {1}".format(url, parser_name))
 
     rdf_parser = RDF.Parser(name=parser_name)
index 674cd52fb918db49e7650d8ae6a8aa716e211722..dfd0ae11b437e1dda5a045852997aaf85cea26c8 100755 (executable)
@@ -4,8 +4,7 @@ from optparse import OptionParser
 import os
 import re
 import sys
-import urllib2
-import urlparse
+from six.moves import urllib
 
 from django.conf import settings
 
@@ -42,12 +41,12 @@ def update_db(root_url, flowcells, serial, debug=False):
     Creates link between flowcell and storage device over http
     """
     for fc in flowcells:
-        url = urlparse.urljoin(root_url, '%s/%s/' % (fc, serial))
+        url = urllib.parse.urljoin(root_url, '%s/%s/' % (fc, serial))
 
-        req = urllib2.Request(url)
+        req = urllib.request.Request(url)
         try:
             response = urllib2.urlopen(req)
-        except urllib2.URLError, e:
+        except urllib.request.HTTPError, e:
             print 'ERROR - HTTP OUTPUT (Return Code: %s); use -v/--verbose for more details.' % (e.code)
             if debug:
                 print e.read()