import sys
import time
import types
-import urllib
-import urllib2
-import urlparse
from zipfile import ZipFile
import RDF
# redland rdf lib
import RDF
import sys
-import urllib
-import urlparse
+from six.moves import urllib
if not 'DJANGO_SETTINGS_MODULE' in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'htsworkflow.settings'
load_library_detail(model, library_urn)
def user_library_id_to_library_urn(library_id):
- split_url = urlparse.urlsplit(library_id)
+ split_url = urllib.parse.urlsplit(library_id)
if len(split_url.scheme) == 0:
return LIBRARY_NS[library_id]
else:
response, content = http.request(LOGIN_URL,
'POST',
headers=headers,
- body=urllib.urlencode(credentials))
+ body=urllib.parse.urlencode(credentials))
LOGGER.debug("Login to {0}, status {1}".format(LOGIN_URL,
response['status']))
import sys
import time
import types
-import urllib
-import urllib2
-import urlparse
from zipfile import ZipFile
import RDF
import sys
import time
import types
-import urllib
-import urllib2
-import urlparse
from zipfile import ZipFile
import RDF
import sys
import time
import traceback
-import urlparse
+from six.moves import urllib
from benderjab import rpc
return reply
def validate_url(self, url):
- user_url = urlparse.urlsplit(url)
+ user_url = urllib.parse.urlsplit(url)
user_scheme = user_url[0]
user_netloc = user_url[1]
user_path = user_url[2]
for source in self.sources:
- source_url = urlparse.urlsplit(source)
+ source_url = urllib.parse.urlsplit(source)
source_scheme = source_url[0]
source_netloc = source_url[1]
source_path = source_url[2]
import types
import re
import sys
-from urlparse import urljoin, urlparse
+from six.moves.urllib.parse import urljoin, urlparse
import RDF
from htsworkflow.util.rdfhelp import libraryOntology as libNS
from pprint import pformat,pprint
import sys
import types
-from urlparse import urljoin, urlparse
+from six.moves.urllib.parse import urljoin, urlparse
from htsworkflow.pipelines.sequences import scan_for_sequences, \
update_model_sequence_library
import string
from six.moves import StringIO
import types
-import urlparse
+from six.moves import urllib
import RDF
from htsworkflow.util.rdfhelp import \
def get_view_namespace(submission_uri):
submission_uri = submission_uri_to_string(submission_uri)
- view_uri = urlparse.urljoin(submission_uri, 'view/')
+ view_uri = urllib.parse.urljoin(submission_uri, 'view/')
viewNS = RDF.NS(view_uri)
return viewNS
import os
import requests
import types
-from urlparse import urljoin, urlparse, urlunparse
+from six.moves.urllib.parse import urljoin, urlparse, urlunparse
LOGGER = logging.getLogger(__name__)
"""Utilities for extracting information from the ENCODE DCC
"""
import logging
-import urlparse
-import urllib2
+from six.moves import urllib
LOGGER = logging.getLogger(__name__)
'http://encodesubmit.ucsc.edu/pipeline/download_ddf/1234'
"""
fragment = 'download_ddf/%s' % (submission_id,)
- return urlparse.urljoin(UCSCEncodePipeline, fragment)
+ return urllib.parse.urljoin(UCSCEncodePipeline, fragment)
def daf_download_url(submission_id):
'http://encodesubmit.ucsc.edu/pipeline/download_daf/1234'
"""
fragment = 'download_daf/%s' % (submission_id,)
- return urlparse.urljoin(UCSCEncodePipeline, fragment)
+ return urllib.parse.urljoin(UCSCEncodePipeline, fragment)
def submission_view_url(submission_id):
'http://encodesubmit.ucsc.edu/pipeline/show/1234'
"""
fragment = 'show/%s' % (submission_id,)
- return urlparse.urljoin(UCSCEncodePipeline, fragment)
+ return urllib.parse.urljoin(UCSCEncodePipeline, fragment)
def get_encodedcc_file_index(genome, composite):
request_url = base_url + 'files.txt'
try:
- request = urllib2.urlopen(request_url)
+ request = urllib.request.urlopen(request_url)
file_index = parse_ucsc_file_index(request, base_url)
return file_index
- except urllib2.HTTPError as e:
+ except urllib.request.HTTPError as e:
err = e
pass
import collections
from datetime import datetime
from glob import glob
-from urlparse import urlparse, urlunparse
-from urllib2 import urlopen
+from six.moves import urllib
import logging
import os
import sys
if isinstance(uri, RDF.Uri):
uri = str(uri)
- parsed = urlparse(uri)
+ parsed = urllib.parse.urlparse(uri)
if len(parsed.query) > 0:
return parsed.query
elif len(parsed.fragment) > 0:
else:
raise ValueError("url to load can't be a RDF literal")
- url_parts = list(urlparse(path))
+ url_parts = list(urllib.parse.urlparse(path))
if len(url_parts[0]) == 0 or url_parts[0] == 'file':
url_parts[0] = 'file'
url_parts[2] = os.path.abspath(url_parts[2])
if parser_name is None or parser_name == 'guess':
parser_name = guess_parser_by_extension(path)
- url = urlunparse(url_parts)
+ url = urllib.parse.urlunparse(url_parts)
logger.info("Opening {0} with parser {1}".format(url, parser_name))
rdf_parser = RDF.Parser(name=parser_name)
import os
import re
import sys
-import urllib2
-import urlparse
+from six.moves import urllib
from django.conf import settings
Creates link between flowcell and storage device over http
"""
for fc in flowcells:
- url = urlparse.urljoin(root_url, '%s/%s/' % (fc, serial))
+ url = urllib.parse.urljoin(root_url, '%s/%s/' % (fc, serial))
- req = urllib2.Request(url)
+ req = urllib.request.Request(url)
try:
response = urllib2.urlopen(req)
- except urllib2.URLError, e:
+ except urllib.request.HTTPError, e:
print 'ERROR - HTTP OUTPUT (Return Code: %s); use -v/--verbose for more details.' % (e.code)
if debug:
print e.read()