Specify text vs binary mode for opening files.
authorDiane Trout <diane@ghic.org>
Fri, 20 Mar 2015 20:47:36 +0000 (13:47 -0700)
committerDiane Trout <diane@ghic.org>
Fri, 20 Mar 2015 20:47:36 +0000 (13:47 -0700)
(And another smart_text cast sliped in)

experiments/views.py
htsworkflow/pipelines/eland.py
htsworkflow/submission/daf.py
htsworkflow/util/hashfile.py
htsworkflow/util/rdfhelp.py

index e5f675da42454868a24197a5f9109a6c4e1ede77..7d30bbeb19d337da8ce3b5f2af57422b640da49b 100644 (file)
@@ -176,7 +176,7 @@ def read_result_file(self, key):
         content_type = data_file.file_type.mimetype
 
     if os.path.exists(data_file.pathname):
-        return HttpResponse(open(data_file.pathname,'r'),
+        return HttpResponse(open(data_file.pathname,'rb'),
                             content_type=content_type)
 
     raise Http404
index b9a7ca3805a182b5bb827a1f459f267454ff041c..3298ae4bf87080314642e1b20dfe3f05519d18ae 100644 (file)
@@ -150,7 +150,7 @@ class ElandLane(ResultLane):
         self._reads = 0
 
         for pathname in self.pathnames:
-            stream = autoopen(pathname, 'r')
+            stream = autoopen(pathname, 'rt')
             if self.eland_type == ELAND_SINGLE:
                 result = self._update_eland_result(stream)
             elif self.eland_type == ELAND_MULTI or \
@@ -548,7 +548,7 @@ class SequenceLane(ResultLane):
         """
         Determine if we have a scarf or fastq sequence file
         """
-        f = open(pathname,'r')
+        f = open(pathname,'rt')
         l = f.readline()
         f.close()
 
@@ -575,8 +575,8 @@ class SequenceLane(ResultLane):
 
         LOGGER.info("summarizing results for %s" % (pathname))
         lines = 0
-        f = open(pathname)
-        for l in f.xreadlines():
+        f = open(pathname, 'rt')
+        for l in f:
             lines += 1
         f.close()
 
index 9c4b30e8c1a33da18d67634671e985ca3fe51702..b2d7419784e0ebf257b688b9867cecdfee3bed22 100644 (file)
@@ -270,7 +270,7 @@ class UCSCSubmission(object):
             self.daf = daf_file.read()
         else:
             # file
-            stream = open(daf_file, 'r')
+            stream = open(daf_file, 'rt')
             self.daf = stream.read()
             stream.close()
 
index 97628832d9408f007ab9e145b285d40e2b0dda6d..c6cc0f9c0ff68fea380a80d783858052781bad61 100644 (file)
@@ -14,7 +14,7 @@ def make_md5sum(filename):
     md5_cache = os.path.join(filename+".md5")
     if os.path.exists(md5_cache):
         logger.debug("Found md5sum in {0}".format(md5_cache))
-        stream = open(md5_cache,'r')
+        stream = open(md5_cache,'rt')
         lines = stream.readlines()
         md5sum = parse_md5sum_line(lines, filename)
     else:
@@ -35,13 +35,13 @@ def make_md5sum_unix(filename, md5_cache):
     md5sum = parse_md5sum_line(lines, filename)
     if md5sum is not None:
         logger.debug("Caching sum in {0}".format(md5_cache))
-        stream = open(md5_cache, "w")
-        stream.write(stdin)
+        stream = open(md5_cache, "wt")
+        stream.write(smart_text(stdin))
         stream.close()
     return md5sum
 
 def parse_md5sum_line(lines, filename):
-    md5sum, md5sum_filename = lines[0].split()
+    md5sum, md5sum_filename = smart_text(lines[0]).split()
     md5sum_filename = os.path.basename(md5sum_filename)
     filename = os.path.basename(filename)
     if md5sum_filename != filename:
index 33197ddb9631f4f07cfb6114bea4a3516a034ed0..ea1020c7b28fde8e4213b960afb7b90233d40300 100644 (file)
@@ -344,7 +344,7 @@ def add_default_schemas(model, schema_path=None):
         for path in schema_path:
             for pathname in glob(os.path.join(path, '*.turtle')):
                 url = 'file://' + os.path.splitext(pathname)[0]
-                stream = open(pathname, 'r')
+                stream = open(pathname, 'rt')
                 add_schema(model, stream, url)
                 stream.close()