More f-strings using flynt 0.69

A large chunk of Bio/ keeping the lines change to
just over 200 to help with reviewing.
This commit is contained in:
Peter Cock
2021-09-16 23:00:16 +01:00
parent cb4f6edf59
commit ddc7f2aeed
40 changed files with 223 additions and 250 deletions

View File

@ -247,11 +247,11 @@ class AbstractCommandline:
for p in parameters:
if not p.names:
if not isinstance(p, _StaticArgument):
raise TypeError("Expected %r to be of type _StaticArgument" % p)
raise TypeError(f"Expected {p!r} to be of type _StaticArgument")
continue
for name in p.names:
if name in aliases:
raise ValueError("Parameter alias %s multiply defined" % name)
raise ValueError(f"Parameter alias {name} multiply defined")
aliases.add(name)
name = p.names[-1]
if _re_prop_name.match(name) is None:
@ -310,7 +310,7 @@ class AbstractCommandline:
for p in self.parameters:
# Check for missing required parameters:
if p.is_required and not (p.is_set):
raise ValueError("Parameter %s is not set." % p.names[-1])
raise ValueError(f"Parameter {p.names[-1]} is not set.")
# Also repeat the parameter validation here, just in case?
def __str__(self):
@ -329,7 +329,7 @@ class AbstractCommandline:
'water -outfile=temp_water.txt -asequence=asis:ACCCGGGCGCGGT -bsequence=asis:ACCCGAGCGCGGT -gapopen=10 -gapextend=0.5'
"""
self._validate()
commandline = "%s " % _escape_filename(self.program_name)
commandline = f"{_escape_filename(self.program_name)} "
for parameter in self.parameters:
if parameter.is_set:
# This will include a trailing space:
@ -355,7 +355,7 @@ class AbstractCommandline:
for parameter in self.parameters:
if parameter.is_set:
if isinstance(parameter, _Switch):
answer += ", %s=True" % parameter.names[-1]
answer += f", {parameter.names[-1]}=True"
else:
answer += f", {parameter.names[-1]}={parameter.value!r}"
answer += ")"
@ -369,7 +369,7 @@ class AbstractCommandline:
return parameter.is_set
else:
return parameter.value
raise ValueError("Option name %s was not found." % name)
raise ValueError(f"Option name {name} was not found.")
def _clear_parameter(self, name):
"""Reset or clear a commandline option value (PRIVATE)."""
@ -380,7 +380,7 @@ class AbstractCommandline:
parameter.is_set = False
cleared_option = True
if not cleared_option:
raise ValueError("Option name %s was not found." % name)
raise ValueError(f"Option name {name} was not found.")
def set_parameter(self, name, value=None):
"""Set a commandline option for a program (OBSOLETE).
@ -412,7 +412,7 @@ class AbstractCommandline:
parameter.is_set = True
set_option = True
if not set_option:
raise ValueError("Option name %s was not found." % name)
raise ValueError(f"Option name {name} was not found.")
def _check_value(self, value, name, check_function):
"""Check whether the given value is valid (PRIVATE).
@ -428,7 +428,7 @@ class AbstractCommandline:
is_good = check_function(value) # May raise an exception
if is_good not in [0, 1, True, False]:
raise ValueError(
"Result of check_function: %r is of an unexpected value" % is_good
f"Result of check_function: {is_good!r} is of an unexpected value"
)
if not is_good:
raise ValueError(
@ -655,7 +655,7 @@ class _Option(_AbstractParameter):
# or " -name " or " -name value ". This choice is now
# now made explicitly when setting up the option.
if self.value is None:
return "%s " % self.names[0]
return f"{self.names[0]} "
if self.is_filename:
v = _escape_filename(self.value)
else:
@ -703,7 +703,7 @@ class _Switch(_AbstractParameter):
"""
assert not hasattr(self, "value")
if self.is_set:
return "%s " % self.names[0]
return f"{self.names[0]} "
else:
return ""
@ -743,9 +743,9 @@ class _Argument(_AbstractParameter):
if self.value is None:
return " "
elif self.is_filename:
return "%s " % _escape_filename(self.value)
return f"{_escape_filename(self.value)} "
else:
return "%s " % self.value
return f"{self.value} "
class _ArgumentList(_Argument):
@ -782,7 +782,7 @@ class _StaticArgument(_AbstractParameter):
self.value = value
def __str__(self):
return "%s " % self.value
return f"{self.value} "
def _escape_filename(filename):
@ -823,7 +823,7 @@ def _escape_filename(filename):
# Its already quoted
return filename
else:
return '"%s"' % filename
return f'"{filename}"'
def _test():

View File

@ -125,8 +125,7 @@ def qblast(
programs = ["blastn", "blastp", "blastx", "tblastn", "tblastx"]
if program not in programs:
raise ValueError(
"Program specified is %s. Expected one of %s"
% (program, ", ".join(programs))
f"Program specified is {program}. Expected one of {', '.join(programs)}"
)
# SHORT_QUERY_ADJUST throws an error when using blastn (wrong parameter
@ -308,20 +307,20 @@ def _parse_qblast_ref_page(handle):
msg = s[i + len('<div class="error msInf">') :].strip()
msg = msg.split("</div>", 1)[0].split("\n", 1)[0].strip()
if msg:
raise ValueError("Error message from NCBI: %s" % msg)
raise ValueError(f"Error message from NCBI: {msg}")
# In spring 2010 the markup was like this:
i = s.find('<p class="error">')
if i != -1:
msg = s[i + len('<p class="error">') :].strip()
msg = msg.split("</p>", 1)[0].split("\n", 1)[0].strip()
if msg:
raise ValueError("Error message from NCBI: %s" % msg)
raise ValueError(f"Error message from NCBI: {msg}")
# Generic search based on the way the error messages start:
i = s.find("Message ID#")
if i != -1:
# Break the message at the first HTML tag
msg = s[i:].split("<", 1)[0].split("\n", 1)[0].strip()
raise ValueError("Error message from NCBI: %s" % msg)
raise ValueError(f"Error message from NCBI: {msg}")
# We didn't recognise the error layout :(
# print(s)
raise ValueError(
@ -332,17 +331,17 @@ def _parse_qblast_ref_page(handle):
elif not rid:
# Can this happen?
raise ValueError(
"No RID found in the 'please wait' page. (although RTOE = %r)" % rtoe
f"No RID found in the 'please wait' page. (although RTOE = {rtoe!r})"
)
elif not rtoe:
# Can this happen?
raise ValueError(
"No RTOE found in the 'please wait' page. (although RID = %r)" % rid
f"No RTOE found in the 'please wait' page. (although RID = {rid!r})"
)
try:
return rid, int(rtoe)
except ValueError:
raise ValueError(
"A non-integer RTOE found in the 'please wait' page, %r" % rtoe
f"A non-integer RTOE found in the 'please wait' page, {rtoe!r}"
) from None

View File

@ -98,7 +98,7 @@ class BlastTableReader:
def _consume_header(self, inline):
for keyword in self.reader_keywords:
if keyword in inline:
return self._Parse("_parse_%s" % self.reader_keywords[keyword], inline)
return self._Parse(f"_parse_{self.reader_keywords[keyword]}", inline)
def _parse_version(self, inline):
program, version, date = inline.split()[1:]

View File

@ -154,7 +154,7 @@ class Alignment:
def __str__(self):
"""Return the BLAST alignment as a formatted string."""
lines = self.title.split("\n")
lines.append("Length = %s\n" % self.length)
lines.append(f"Length = {self.length}\n")
return "\n ".join(lines)
@ -245,7 +245,7 @@ class HSP:
lines.append(
"Query:%8s %s %s" % (self.query_start, self.query, self.query_end)
)
lines.append(" %s" % self.match)
lines.append(f" {self.match}")
lines.append(
"Sbjct:%8s %s %s" % (self.sbjct_start, self.sbjct, self.sbjct_end)
)

View File

@ -1163,7 +1163,7 @@ class Record:
outputfile.write("\tNAME\tGWEIGHT")
# Now add headers for data columns.
for j in expindex:
outputfile.write("\t%s" % self.expid[j])
outputfile.write(f"\t{self.expid[j]}")
outputfile.write("\n")
if aid:
outputfile.write("AID")
@ -1178,7 +1178,7 @@ class Record:
outputfile.write("\t")
outputfile.write("\t\t")
for j in expindex:
outputfile.write("\t%f" % eweight[j])
outputfile.write(f"\t{eweight[j]:f}")
outputfile.write("\n")
for i in geneindex:
if gid:

View File

@ -154,7 +154,7 @@ def __read_names(record, line):
# Ali1: 60456.blo.gz.aln Ali2: allscop//14984.blo.gz.aln
# ------query----- -------hit-------------
if "Ali1:" not in line:
raise ValueError("Line does not contain 'Ali1:':\n%s" % line)
raise ValueError(f"Line does not contain 'Ali1:':\n{line}")
m = __regex["names"].search(line)
record.query = m.group(1)
record.hit = m.group(2)
@ -162,14 +162,14 @@ def __read_names(record, line):
def __read_threshold(record, line):
if not line.startswith("Threshold"):
raise ValueError("Line does not start with 'Threshold':\n%s" % line)
raise ValueError(f"Line does not start with 'Threshold':\n{line}")
m = __regex["threshold"].search(line)
record.gap_threshold = float(m.group(1))
def __read_lengths(record, line):
if not line.startswith("length1="):
raise ValueError("Line does not start with 'length1=':\n%s" % line)
raise ValueError(f"Line does not start with 'length1=':\n{line}")
m = __regex["lengths"].search(line)
record.query_length = int(m.group(1))
record.query_filtered_length = float(m.group(2))
@ -179,7 +179,7 @@ def __read_lengths(record, line):
def __read_profilewidth(record, line):
if "Nseqs1" not in line:
raise ValueError("Line does not contain 'Nseqs1':\n%s" % line)
raise ValueError(f"Line does not contain 'Nseqs1':\n{line}")
m = __regex["profilewidth"].search(line)
record.query_nseqs = int(m.group(1))
record.query_neffseqs = float(m.group(2))
@ -189,7 +189,7 @@ def __read_profilewidth(record, line):
def __read_scores(record, line):
if not line.startswith("Smith-Waterman"):
raise ValueError("Line does not start with 'Smith-Waterman':\n%s" % line)
raise ValueError(f"Line does not start with 'Smith-Waterman':\n{line}")
m = __regex["scores"].search(line)
if m:
record.sw_score = int(m.group(1))

View File

@ -120,14 +120,14 @@ class CodonTable:
# Build the table...
answer += "\n\n"
answer += " |" + "|".join(" %s " % c2 for c2 in letters) + "|"
answer += " |" + "|".join(f" {c2} " for c2 in letters) + "|"
answer += "\n--+" + "+".join("---------" for c2 in letters) + "+--"
for c1 in letters:
for c3 in letters:
line = c1 + " |"
for c2 in letters:
codon = c1 + c2 + c3
line += " %s" % codon
line += f" {codon}"
if codon in self.stop_codons:
line += " Stop|"
else:
@ -138,9 +138,9 @@ class CodonTable:
except TranslationError:
amino = "?"
if codon in self.start_codons:
line += " %s(s)|" % amino
line += f" {amino}(s)|"
else:
line += " %s |" % amino
line += f" {amino} |"
line += " " + c3
answer += "\n" + line
answer += "\n--+" + "+".join("---------" for c2 in letters) + "+--"
@ -179,11 +179,7 @@ class NCBICodonTable(CodonTable):
def __repr__(self):
"""Represent the NCBI codon table class as a string for debugging."""
return "%s(id=%r, names=%r, ...)" % (
self.__class__.__name__,
self.id,
self.names,
)
return f"{self.__class__.__name__}(id={self.id!r}, names={self.names!r}, ...)"
class NCBICodonTableDNA(NCBICodonTable):
@ -260,7 +256,7 @@ def list_possible_proteins(codon, forward_table, ambiguous_nucleotide_values):
if stops:
if possible:
raise TranslationError(
"ambiguous codon %r codes for both proteins and stop codons" % codon
f"ambiguous codon {codon!r} codes for both proteins and stop codons"
)
# This is a true stop codon - tell the caller about it
raise KeyError(codon)

View File

@ -85,7 +85,7 @@ class NoneElement:
attributes = self.attributes
except AttributeError:
return "NoneElement"
return "NoneElement(attributes=%r)" % attributes
return f"NoneElement(attributes={attributes!r})"
class IntegerElement(int):
@ -158,7 +158,7 @@ class ListElement(list):
"""Append an element to the list, checking tags."""
key = value.key
if self.allowed_tags is not None and key not in self.allowed_tags:
raise ValueError("Unexpected item '%s' in list" % key)
raise ValueError(f"Unexpected item '{key}' in list")
self.append(value)
@ -192,7 +192,7 @@ class DictionaryElement(dict):
key = value.key
tag = value.tag
if self.allowed_tags is not None and tag not in self.allowed_tags:
raise ValueError("Unexpected item '%s' in dictionary" % key)
raise ValueError(f"Unexpected item '{key}' in dictionary")
if self.repeated_tags and key in self.repeated_tags:
self[key].append(value)
else:
@ -481,7 +481,7 @@ class DataHandler(metaclass=DataHandlerMeta):
def schemaHandler(self, name, attrs):
"""Process the XML schema (before processing the element)."""
key = "%s noNamespaceSchemaLocation" % self.schema_namespace
key = f"{self.schema_namespace} noNamespaceSchemaLocation"
schema = attrs[key]
handle = self.open_xsd_file(os.path.basename(schema))
# if there is no local xsd file grab the url and parse the file
@ -567,7 +567,7 @@ class DataHandler(metaclass=DataHandlerMeta):
self.parser.EndElementHandler = self.endStringElementHandler
self.parser.CharacterDataHandler = self.characterDataHandler
else:
raise ValueError("Unknown item type %s" % name)
raise ValueError(f"Unknown item type {name}")
elif tag in self.errors:
self.parser.EndElementHandler = self.endErrorElementHandler
self.parser.CharacterDataHandler = self.characterDataHandler
@ -635,7 +635,7 @@ class DataHandler(metaclass=DataHandlerMeta):
key = name
# self.allowed_tags is ignored for now. Anyway we know what to do
# with this tag.
tag = "<%s" % name
tag = f"<{name}"
for key, value in attrs.items():
tag += f' {key}="{value}"'
tag += ">"
@ -681,7 +681,7 @@ class DataHandler(metaclass=DataHandlerMeta):
uri, name = name.split()
except ValueError:
pass
tag = "</%s>" % name
tag = f"</{name}>"
self.data.append(tag)
def endSkipElementHandler(self, name):
@ -976,7 +976,7 @@ class DataHandler(metaclass=DataHandlerMeta):
# urls always have a forward slash, don't use os.path.join
url = source.rstrip("/") + "/" + systemId
else:
raise ValueError("Unexpected URL scheme %r" % urlinfo.scheme)
raise ValueError(f"Unexpected URL scheme {urlinfo.scheme!r}")
self.dtd_urls.append(url)
# First, try to load the local version of the DTD file
location, filename = os.path.split(systemId)

View File

@ -89,11 +89,11 @@ class Record(dict):
"""Return the canonical string representation of the Record object."""
if self["ID"]:
if self["DE"]:
return "%s (%s, %s)" % (self.__class__.__name__, self["ID"], self["DE"])
return f"{self.__class__.__name__} ({self['ID']}, {self['DE']})"
else:
return "%s (%s)" % (self.__class__.__name__, self["ID"])
return f"{self.__class__.__name__} ({self['ID']})"
else:
return "%s ( )" % (self.__class__.__name__)
return f"{self.__class__.__name__} ( )"
def __str__(self):
"""Return a readable string representation of the Record object."""

View File

@ -85,7 +85,7 @@ class Reference:
def __read_prosite_reference_line(record, line):
line = line.rstrip()
if line[-1] != "}":
raise ValueError("I don't understand the Prosite reference on line\n%s" % line)
raise ValueError(f"I don't understand the Prosite reference on line\n{line}")
acc, name = line[1:-1].split("; ")
record.prosite_refs.append((acc, name))
@ -118,7 +118,7 @@ def __read_reference_line(record, line):
else:
reference.citation += line[5:]
return True
raise Exception("I don't understand the reference line\n%s" % line)
raise Exception(f"I don't understand the reference line\n{line}")
def __read_copyright_line(record, line):
@ -141,7 +141,7 @@ def __read(handle):
if not line.startswith("{PDOC"):
raise ValueError("Line does not start with '{PDOC':\n%s" % line)
if line[-1] != "}":
raise ValueError("I don't understand accession line\n%s" % line)
raise ValueError(f"I don't understand accession line\n{line}")
record.accession = line[1:-1]
# Read the Prosite references
for line in handle:

View File

@ -163,7 +163,7 @@ def __read(handle):
record = Record()
cols = value.split("; ")
if len(cols) != 2:
raise ValueError("I don't understand identification line\n%s" % line)
raise ValueError(f"I don't understand identification line\n{line}")
record.name = cols[0]
record.type = cols[1].rstrip(".") # don't want '.'
elif keyword == "AC":
@ -177,17 +177,17 @@ def __read(handle):
# Remove last word
record.created = dates[0].rsplit(" ", 1)[0]
else:
raise ValueError("I don't understand date line\n%s" % line)
raise ValueError(f"I don't understand date line\n{line}")
if dates[1].endswith((" (DATA UPDATE)", " DATA UPDATE")):
# Remove last two words
record.data_update = dates[1].rsplit(" ", 2)[0]
else:
raise ValueError("I don't understand date line\n%s" % line)
raise ValueError(f"I don't understand date line\n{line}")
if dates[2].endswith((" (INFO UPDATE)", " INFO UPDATE")):
# Remove last two words
record.info_update = dates[2].rsplit(" ", 2)[0]
else:
raise ValueError("I don't understand date line\n%s" % line)
raise ValueError(f"I don't understand date line\n{line}")
elif keyword == "DE":
record.description = value
elif keyword == "PA":
@ -284,7 +284,7 @@ def __read(handle):
elif type == "?":
record.dr_unknown.append((acc, name))
else:
raise ValueError("I don't understand type flag %s" % type)
raise ValueError(f"I don't understand type flag {type}")
elif keyword == "3D":
cols = value.split()
for id in cols:
@ -300,7 +300,7 @@ def __read(handle):
continue
break
else:
raise ValueError("Unknown keyword %s found" % keyword)
raise ValueError(f"Unknown keyword {keyword} found")
else:
return
if not record:

View File

@ -85,9 +85,9 @@ def get_prosite_raw(id, cgi=None):
ValueError: Failed to find entry 'DOES_NOT_EXIST' on ExPASy
"""
handle = _open("https://prosite.expasy.org/%s.txt" % id)
handle = _open(f"https://prosite.expasy.org/{id}.txt")
if handle.url == "https://www.expasy.org/":
raise ValueError("Failed to find entry '%s' on ExPASy" % id) from None
raise ValueError(f"Failed to find entry '{id}' on ExPASy") from None
return handle
@ -114,10 +114,10 @@ def get_sprot_raw(id):
"""
try:
handle = _open("http://www.uniprot.org/uniprot/%s.txt" % id)
handle = _open(f"http://www.uniprot.org/uniprot/{id}.txt")
except HTTPError as exception:
if exception.code == 404:
raise ValueError("Failed to find SwissProt entry '%s'" % id) from None
raise ValueError(f"Failed to find SwissProt entry '{id}'") from None
else:
raise
return handle

View File

@ -122,11 +122,11 @@ class Record(dict):
"""Return the canonical string representation of the Record object."""
if self["ID"]:
if self["AC"]:
return "%s (%s, %s)" % (self.__class__.__name__, self["ID"], self["AC"])
return f"{self.__class__.__name__} ({self['ID']}, {self['AC']})"
else:
return "%s (%s)" % (self.__class__.__name__, self["ID"])
return f"{self.__class__.__name__} ({self['ID']})"
else:
return "%s ( )" % (self.__class__.__name__)
return f"{self.__class__.__name__} ( )"
def __str__(self):
"""Return a readable string representation of the Record object."""

View File

@ -39,8 +39,8 @@ class Record:
def __str__(self):
"""Return the GEO record as a string."""
output = ""
output += "GEO Type: %s\n" % self.entity_type
output += "GEO Id: %s\n" % self.entity_id
output += f"GEO Type: {self.entity_type}\n"
output += f"GEO Id: {self.entity_id}\n"
att_keys = sorted(self.entity_attributes)
for key in att_keys:
contents = self.entity_attributes[key]
@ -68,16 +68,16 @@ class Record:
# so only show the first 20 lines of data
MAX_ROWS = 20 + 1 # include header in count
for row in self.table_rows[0:MAX_ROWS]:
output += "%s: " % self.table_rows.index(row)
output += f"{self.table_rows.index(row)}: "
for col in row:
output += "%s\t" % col
output += f"{col}\t"
output += "\n"
if len(self.table_rows) > MAX_ROWS:
output += "...\n"
row = self.table_rows[-1]
output += "%s: " % self.table_rows.index(row)
output += f"{self.table_rows.index(row)}: "
for col in row:
output += "%s\t" % col
output += f"{col}\t"
output += "\n"
return output

View File

@ -190,7 +190,7 @@ class MarkovModelBuilder:
for state in initial_prob:
if state not in self._state_alphabet:
raise ValueError(
"State %s was not found in the sequence alphabet" % state
f"State {state} was not found in the sequence alphabet"
)
# distribute the residual probability, if any
@ -343,7 +343,7 @@ class MarkovModelBuilder:
for state in [from_state, to_state]:
if state not in self._state_alphabet:
raise ValueError(
"State %s was not found in the sequence alphabet" % state
f"State {state} was not found in the sequence alphabet"
)
# ensure that the states are not already set
@ -377,8 +377,7 @@ class MarkovModelBuilder:
del self.transition_pseudo[(from_state, to_state)]
except KeyError:
raise KeyError(
"Transition from %s to %s is already disallowed."
% (from_state, to_state)
f"Transition from {from_state} to {to_state} is already disallowed."
)
def set_transition_score(self, from_state, to_state, probability):

View File

@ -47,10 +47,7 @@ def pretty_print_prediction(
else:
extension = len(emissions) - cur_position
print(
"%s%s"
% (emission_title, emissions[cur_position : cur_position + seq_length])
)
print(f"{emission_title}{emissions[cur_position:cur_position + seq_length]}")
print(f"{real_title}{real_state[cur_position : cur_position + seq_length]}")
print(
"%s%s\n"

View File

@ -177,7 +177,7 @@ class KGMLParser:
from Bio import BiopythonParserWarning
warnings.warn(
"Warning: tag %s not implemented in parser" % element.tag,
f"Warning: tag {element.tag} not implemented in parser",
BiopythonParserWarning,
)
return self.pathway

View File

@ -82,7 +82,7 @@ class Pathway:
"<!DOCTYPE pathway SYSTEM "
'"http://www.genome.jp/kegg/xml/'
'KGML_v0.7.2_.dtd">',
"<!-- Created by KGML_Pathway.py %s -->" % time.asctime(),
f"<!-- Created by KGML_Pathway.py {time.asctime()} -->",
]
)
rough_xml = header + ET.tostring(self.element, "utf-8").decode()
@ -115,8 +115,7 @@ class Pathway:
# We insist that the node ID is an integer and corresponds to an entry
if not isinstance(reaction.id, int):
raise ValueError(
"Node ID must be an integer, got %s (%s)"
% (type(reaction.id), reaction.id)
f"Node ID must be an integer, got {type(reaction.id)} ({reaction.id})"
)
if reaction.id not in self.entries:
raise ValueError("Reaction ID %d has no corresponding entry" % reaction.id)
@ -127,8 +126,7 @@ class Pathway:
"""Remove a Reaction element from the pathway."""
if not isinstance(reaction.id, int):
raise TypeError(
"Node ID must be an integer, got %s (%s)"
% (type(reaction.id), reaction.id)
f"Node ID must be an integer, got {type(reaction.id)} ({reaction.id})"
)
# We need to remove the reaction from any other elements that may
# contain it, which means removing those elements
@ -147,10 +145,10 @@ class Pathway:
def __str__(self):
"""Return a readable summary description string."""
outstr = [
"Pathway: %s" % self.title,
"KEGG ID: %s" % self.name,
"Image file: %s" % self.image,
"Organism: %s" % self.org,
f"Pathway: {self.title}",
f"KEGG ID: {self.name}",
f"Image file: {self.image}",
f"Organism: {self.org}",
"Entries: %d" % len(self.entries),
"Entry types:",
]
@ -166,7 +164,7 @@ class Pathway:
def _setname(self, value):
if not value.startswith("path:"):
raise ValueError("Pathway name should begin with 'path:', got %s" % value)
raise ValueError(f"Pathway name should begin with 'path:', got {value}")
self._name = value
def _delname(self):
@ -298,10 +296,10 @@ class Entry:
"""Return readable descriptive string."""
outstr = [
"Entry node ID: %d" % self.id,
"Names: %s" % self.name,
"Type: %s" % self.type,
"Components: %s" % self.components,
"Reactions: %s" % self.reaction,
f"Names: {self.name}",
f"Type: {self.type}",
f"Components: {self.components}",
f"Reactions: {self.reaction}",
"Graphics elements: %d %s" % (len(self.graphics), self.graphics),
]
return "\n".join(outstr) + "\n"
@ -315,7 +313,7 @@ class Entry:
if self._pathway is not None:
if element.id not in self._pathway.entries:
raise ValueError(
"Component %s is not an entry in the pathway" % element.id
f"Component {element.id} is not an entry in the pathway"
)
self.components.add(element)
@ -679,11 +677,11 @@ class Reaction:
def __str__(self):
"""Return an informative human-readable string."""
outstr = [
"Reaction node ID: %s" % self.id,
"Reaction KEGG IDs: %s" % self.name,
"Type: %s" % self.type,
"Substrates: %s" % ",".join([s.name for s in self.substrates]),
"Products: %s" % ",".join([s.name for s in self.products]),
f"Reaction node ID: {self.id}",
f"Reaction KEGG IDs: {self.name}",
f"Type: {self.type}",
f"Substrates: {','.join([s.name for s in self.substrates])}",
f"Products: {','.join([s.name for s in self.products])}",
]
return "\n".join(outstr) + "\n"
@ -812,7 +810,7 @@ class Relation:
str(self.entry2),
]
for s in self.subtypes:
outstr.extend(["Subtype: %s" % s[0], str(s[1])])
outstr.extend([f"Subtype: {s[0]}", str(s[1])])
return "\n".join(outstr)
# Properties entry1 and entry2

View File

@ -234,7 +234,7 @@ class StepMatrix:
def smprint(self, name="your_name_here"):
"""Print a stepmatrix."""
matrix = "usertype %s stepmatrix=%d\n" % (name, len(self.symbols))
matrix += " %s\n" % " ".join(self.symbols)
matrix += f" {' '.join(self.symbols)}\n"
for x in self.symbols:
matrix += "[%s]".ljust(8) % x
for y in self.symbols:
@ -322,7 +322,7 @@ def _unique_label(previous_labels, label):
copy_num = 1
if label_split[-1] != "copy":
copy_num = int(label_split[-1][4:]) + 1
new_label = "%s.copy%s" % (".".join(label_split[:-1]), copy_num)
new_label = f"{'.'.join(label_split[:-1])}.copy{copy_num}"
label = new_label
else:
label += ".copy"
@ -605,9 +605,7 @@ class Commandline:
for token in token_indices:
self.options[options[token].lower()] = None
except ValueError:
raise NexusError(
"Incorrect formatting in line: %s" % line
) from None
raise NexusError(f"Incorrect formatting in line: {line}") from None
class Block:
@ -693,7 +691,7 @@ class Nexus:
self.filename = "input_string"
else:
print(input.strip()[:50])
raise NexusError("Unrecognized input: %s ..." % input[:100]) from None
raise NexusError(f"Unrecognized input: {input[:100]} ...") from None
file_contents = file_contents.strip()
if file_contents.startswith("#NEXUS"):
file_contents = file_contents[6:]
@ -728,7 +726,7 @@ class Nexus:
inblock = True
title = cl.split()[1].lower()
else:
raise NexusError("Illegal block nesting in block %s" % title)
raise NexusError(f"Illegal block nesting in block {title}")
elif cl.lower().startswith("end"):
if inblock:
inblock = False
@ -756,7 +754,7 @@ class Nexus:
try:
getattr(self, "_" + line.command)(line.options)
except AttributeError:
raise NexusError("Unknown command: %s " % line.command) from None
raise NexusError(f"Unknown command: {line.command} ") from None
def _title(self, options):
pass
@ -930,7 +928,7 @@ class Nexus:
if c is None:
break
elif c != ",":
raise NexusError("Missing ',' in line %s." % options)
raise NexusError(f"Missing ',' in line {options}.")
def _charstatelabels(self, options):
self.charlabels = {}
@ -963,13 +961,13 @@ class Nexus:
elif c != ",":
# Check if states are defined, otherwise report error
if c != "/":
raise NexusError("Missing ',' in line %s." % options)
raise NexusError(f"Missing ',' in line {options}.")
# Get the first state
state = quotestrip(opts.next_word())
if state is None:
raise NexusError("Missing character state in line %s." % options)
raise NexusError(f"Missing character state in line {options}.")
while True:
# Make sure current state does not exceed number of
@ -1139,11 +1137,11 @@ class Nexus:
if c is None:
break
elif c != ",":
raise NexusError("Missing ',' in line %s." % options)
raise NexusError(f"Missing ',' in line {options}.")
except NexusError:
raise
except Exception: # TODO: ValueError?
raise NexusError("Format error in line %s." % options) from None
raise NexusError(f"Format error in line {options}.") from None
def _utree(self, options):
"""Use 'utree' to denote an unrooted tree (ex: clustalx) (PRIVATE)."""
@ -1156,7 +1154,7 @@ class Nexus:
dummy = opts.next_nonwhitespace()
name = opts.next_word()
if opts.next_nonwhitespace() != "=":
raise NexusError("Syntax error in tree description: %s" % options[:50])
raise NexusError(f"Syntax error in tree description: {options[:50]}")
rooted = False
weight = 1.0
while opts.peek_nonwhitespace() == "[":
@ -1216,7 +1214,7 @@ class Nexus:
opts = CharBuffer(options)
name = self._name_n_vector(opts)
if not name:
raise NexusError("Formatting error in taxpartition: %s " % options)
raise NexusError(f"Formatting error in taxpartition: {options} ")
# now collect thesubbpartitions and parse them
# subpartitons separated by commas - which unfortunately could be part of a quoted identifier...
# this is rather unelegant, but we have to avoid double-parsing and potential change of special nexus-words
@ -1248,7 +1246,7 @@ class Nexus:
# mcclade calls it CodonPositions, but you never know...
codonname = [n for n in self.charpartitions if n not in prev_partitions]
if codonname == [] or len(codonname) > 1:
raise NexusError("Formatting Error in codonposset: %s " % options)
raise NexusError(f"Formatting Error in codonposset: {options} ")
else:
self.codonposset = codonname[0]
@ -1262,7 +1260,7 @@ class Nexus:
opts = CharBuffer(options)
name = self._name_n_vector(opts)
if not name:
raise NexusError("Formatting error in charpartition: %s " % options)
raise NexusError(f"Formatting error in charpartition: {options} ")
# now collect the subpartitions and parse them
# subpartitions separated by commas - which unfortunately could be part
# of a quoted identifier...
@ -1293,7 +1291,7 @@ class Nexus:
name = self._name_n_vector(opts, separator=separator)
indices = self._parse_list(opts, set_type=set_type)
if indices is None:
raise NexusError("Formatting error in line: %s " % options)
raise NexusError(f"Formatting error in line: {options} ")
return name, indices
def _name_n_vector(self, opts, separator="="):
@ -1304,18 +1302,18 @@ class Nexus:
if name == "*":
name = opts.next_word()
if not name:
raise NexusError("Formatting error in line: %s " % rest)
raise NexusError(f"Formatting error in line: {rest} ")
name = quotestrip(name)
if opts.peek_nonwhitespace == "(":
open = opts.next_nonwhitespace()
qualifier = open.next_word()
close = opts.next_nonwhitespace()
if qualifier.lower() == "vector":
raise NexusError("Unsupported VECTOR format in line %s" % (opts))
raise NexusError(f"Unsupported VECTOR format in line {opts}")
elif qualifier.lower() != "standard":
raise NexusError(f"Unknown qualifier {qualifier} in line {opts}")
if opts.next_nonwhitespace() != separator:
raise NexusError("Formatting error in line: %s " % rest)
raise NexusError(f"Formatting error in line: {rest} ")
return name
def _parse_list(self, options_buffer, set_type):
@ -1403,7 +1401,7 @@ class Nexus:
return self.charsets[identifier]
else:
raise NexusError(
"Unknown character identifier: %s" % identifier
f"Unknown character identifier: {identifier}"
) from None
else:
if n <= self.nchar:
@ -1424,7 +1422,7 @@ class Nexus:
return self.taxsets[identifier]
else:
raise NexusError(
"Unknown taxon identifier: %s" % identifier
f"Unknown taxon identifier: {identifier}"
) from None
else:
if n > 0 and n <= self.ntax:
@ -1435,7 +1433,7 @@ class Nexus:
% (identifier, self.ntax)
)
else:
raise NexusError("Unknown set specification: %s." % set_type)
raise NexusError(f"Unknown set specification: {set_type}.")
def _stateset(self, options):
# Not implemented
@ -1562,7 +1560,7 @@ class Nexus:
)
if interleave_by_partition:
if interleave_by_partition not in self.charpartitions:
raise NexusError("Unknown partition: %r" % interleave_by_partition)
raise NexusError(f"Unknown partition: {interleave_by_partition!r}")
else:
partition = self.charpartitions[interleave_by_partition]
# we need to sort the partition names by starting position
@ -1726,7 +1724,7 @@ class Nexus:
for n, s in self.taxsets.items():
tset = [safename(t, mrbayes=mrbayes) for t in s if t not in delete]
if tset:
setsb.append("taxset %s = %s" % (safename(n), " ".join(tset)))
setsb.append(f"taxset {safename(n)} = {' '.join(tset)}")
for n, p in self.charpartitions.items():
if not include_codons and n == CODONPOSITIONS:
continue
@ -1934,7 +1932,7 @@ class Nexus:
matrix = self.matrix
if [t for t in delete if not self._check_taxlabels(t)]:
raise NexusError(
"Unknown taxa: %s" % ", ".join(set(delete).difference(self.taxlabels))
f"Unknown taxa: {', '.join(set(delete).difference(self.taxlabels))}"
)
if exclude != []:
undelete = [t for t in self.taxlabels if t in matrix and t not in delete]
@ -2137,7 +2135,7 @@ else:
decommented = cnexus.scanfile(file_contents)
# check for unmatched parentheses
if decommented == "[" or decommented == "]":
raise NexusError("Unmatched %s" % decommented)
raise NexusError(f"Unmatched {decommented}")
# cnexus can't return lists, so in analogy we separate
# commandlines with chr(7) (a character that shouldn't be part of a
# nexus file under normal circumstances)

View File

@ -263,9 +263,9 @@ class Tree(Nodes.Chain):
"""
id = self.search_taxon(taxon)
if id is None:
raise TreeError("Taxon not found: %s" % taxon)
raise TreeError(f"Taxon not found: {taxon}")
elif id not in self.get_terminals():
raise TreeError("Not a terminal taxon: %s" % taxon)
raise TreeError(f"Not a terminal taxon: {taxon}")
else:
prev = self.unlink(id)
self.kill(id)
@ -604,17 +604,17 @@ class Tree(Nodes.Chain):
tx = n.data.taxon
if not tx:
tx = "-"
blength = "%0.2f" % n.data.branchlength
blength = f"{n.data.branchlength:0.2f}"
if blength is None:
blength = "-"
sum_blength = "-"
else:
sum_blength = "%0.2f" % self.sum_branchlength(node=i)
sum_blength = f"{self.sum_branchlength(node=i):0.2f}"
support = n.data.support
if support is None:
support = "-"
else:
support = "%0.2f" % support
support = f"{support:0.2f}"
comment = n.data.comment
if comment is None:
comment = "-"
@ -631,7 +631,7 @@ class Tree(Nodes.Chain):
)
)
print("\n".join("%3s %32s %15s %15s %8s %10s %8s %20s" % l for l in table))
print("\nRoot: %s" % self.root)
print(f"\nRoot: {self.root}")
def to_string(
self,
@ -660,25 +660,25 @@ class Tree(Nodes.Chain):
self.support_as_branchlengths
): # support as branchlengths (eg. PAUP), ignore actual branchlengths
if terminal: # terminal branches have 100% support
info_string = ":%1.2f" % self.max_support
info_string = f":{self.max_support:1.2f}"
elif data.support:
info_string = ":%1.2f" % (data.support)
info_string = f":{data.support:1.2f}"
else:
info_string = ":0.00"
elif self.branchlengths_only: # write only branchlengths, ignore support
info_string = ":%1.5f" % (data.branchlength)
info_string = f":{data.branchlength:1.5f}"
else: # write support and branchlengths (e.g. .con tree of mrbayes)
if terminal:
info_string = ":%1.5f" % (data.branchlength)
info_string = f":{data.branchlength:1.5f}"
else:
if (
data.branchlength is not None and data.support is not None
): # we have blen and support
info_string = f"{data.support:1.2f}:{data.branchlength:1.5f}"
elif data.branchlength is not None: # we have only blen
info_string = "0.00000:%1.5f" % (data.branchlength)
info_string = f"0.00000:{data.branchlength:1.5f}"
elif data.support is not None: # we have only support
info_string = "%1.2f:0.00000" % (data.support)
info_string = f"{data.support:1.2f}:0.00000"
else:
info_string = "0.00:0.00000"
if not ignore_comments:
@ -713,10 +713,7 @@ class Tree(Nodes.Chain):
else:
succnodes = ladderize_nodes(self.node(node).succ, ladderize=ladderize)
subtrees = [newickize(sn, ladderize=ladderize) for sn in succnodes]
return "(%s)%s" % (
",".join(subtrees),
make_info_string(self.node(node).data),
)
return f"({','.join(subtrees)}){make_info_string(self.node(node).data)}"
treeline = ["tree"]
if self.name:
@ -725,12 +722,12 @@ class Tree(Nodes.Chain):
treeline.append("a_tree")
treeline.append("=")
if self.weight != 1:
treeline.append("[&W%s]" % str(round(float(self.weight), 3)))
treeline.append(f"[&W{str(round(float(self.weight), 3))}]")
if self.rooted:
treeline.append("[&R]")
succnodes = ladderize_nodes(self.node(self.root).succ)
subtrees = [newickize(sn, ladderize=ladderize) for sn in succnodes]
treeline.append("(%s)" % ",".join(subtrees))
treeline.append(f"({','.join(subtrees)})")
if plain_newick:
return treeline[-1]
else:
@ -854,9 +851,7 @@ class Tree(Nodes.Chain):
i for i in self.all_ids() if self.node(i).prev is None and i != self.root
]
if len(oldroot) > 1:
raise TreeError(
"Isolated nodes in tree description: %s" % ",".join(oldroot)
)
raise TreeError(f"Isolated nodes in tree description: {','.join(oldroot)}")
elif len(oldroot) == 1:
self.kill(oldroot[0])
return self.root
@ -934,7 +929,7 @@ def consensus(trees, threshold=0.5, outgroup=None):
for c in delclades:
del clades[c]
# create a tree with a root node
consensus = Tree(name="consensus_%2.1f" % float(threshold), data=dataclass)
consensus = Tree(name=f"consensus_{float(threshold):2.1f}", data=dataclass)
# each clade needs a node in the new tree, add them as isolated nodes
for c, s in clades.items():
node = Nodes.Node(data=dataclass())

View File

@ -38,7 +38,7 @@ class Graph:
values = sorted(
(x, self._edge_map[(key, x)]) for x in list(self._adjacency_list[key])
)
s += "(%r: %s)" % (key, ",".join(repr(v) for v in values))
s += f"({key!r}: {','.join(repr(v) for v in values)})"
return s + ">"
def __str__(self):

View File

@ -35,7 +35,7 @@ class MultiGraph:
s = "<MultiGraph: "
for key in sorted(self._adjacency_list):
values = sorted(self._adjacency_list[key])
s += "(%r: %s)" % (key, ",".join(repr(v) for v in values))
s += f"({key!r}: {','.join(repr(v) for v in values)})"
return s + ">"
def __str__(self):

View File

@ -119,7 +119,7 @@ def _check_bases(seq_string):
seq_string = seq_string.replace(c, "")
# Check only allowed IUPAC letters
if not set(seq_string).issubset(set("ABCDGHKMNRSTVWY")):
raise TypeError("Invalid character found in %r" % seq_string)
raise TypeError(f"Invalid character found in {seq_string!r}")
return " " + seq_string
@ -177,7 +177,7 @@ class FormattedSeq:
self.linear = seq.linear
self.klass = seq.klass
else:
raise TypeError("expected Seq or MutableSeq, got %s" % type(seq))
raise TypeError(f"expected Seq or MutableSeq, got {type(seq)}")
def __len__(self):
"""Return length of ``FormattedSeq``.
@ -270,7 +270,7 @@ class RestrictionType(type):
See below.
"""
if "-" in name:
raise ValueError("Problem with hyphen in %r as enzyme name" % name)
raise ValueError(f"Problem with hyphen in {name!r} as enzyme name")
# 2011/11/26 - Nobody knows what this call was supposed to accomplish,
# but all unit tests seem to pass without it.
# super().__init__(cls, name, bases, dct)
@ -283,7 +283,7 @@ class RestrictionType(type):
pass
except Exception:
raise ValueError(
"Problem with regular expression, re.compiled(%r)" % cls.compsite
f"Problem with regular expression, re.compiled({cls.compsite!r})"
) from None
def __add__(cls, other):
@ -352,7 +352,7 @@ class RestrictionType(type):
Used with eval or exec will instantiate the enzyme.
"""
return "%s" % cls.__name__
return f"{cls.__name__}"
def __len__(cls):
"""Return length of recognition site of enzyme as int."""
@ -431,7 +431,7 @@ class RestrictionType(type):
True
"""
if not isinstance(other, RestrictionType):
raise TypeError("expected RestrictionType, got %s instead" % type(other))
raise TypeError(f"expected RestrictionType, got {type(other)} instead")
return cls._mod1(other)
def __ge__(cls, other):
@ -1027,7 +1027,7 @@ class Unknown(AbstractCut):
If linear is False, the sequence is considered to be circular and the
output will be modified accordingly.
"""
raise NotImplementedError("%s restriction is unknown." % cls.__name__)
raise NotImplementedError(f"{cls.__name__} restriction is unknown.")
catalyze = catalyse
@ -1958,7 +1958,7 @@ class NotDefined(AbstractCut):
>>>
"""
return "? %s ?" % cls.site
return f"? {cls.site} ?"
class Commercially_available(AbstractCut):
@ -2062,7 +2062,7 @@ class RestrictionBatch(set):
def __repr__(self):
"""Represent ``RestrictionBatch`` class as a string for debugging."""
return "RestrictionBatch(%s)" % self.elements()
return f"RestrictionBatch({self.elements()})"
def __contains__(self, other):
"""Implement ``in`` for ``RestrictionBatch``."""
@ -2109,7 +2109,7 @@ class RestrictionBatch(set):
self.add(e)
return e
else:
raise ValueError("enzyme %s is not in RestrictionBatch" % e.__name__)
raise ValueError(f"enzyme {e.__name__} is not in RestrictionBatch")
def lambdasplit(self, func):
"""Filter enzymes in batch with supplied function.
@ -2198,7 +2198,7 @@ class RestrictionBatch(set):
return eval(y)
except (NameError, SyntaxError):
pass
raise ValueError("%s is not a RestrictionType" % y.__class__)
raise ValueError(f"{y.__class__} is not a RestrictionType")
def is_restriction(self, y):
"""Return if enzyme (name) is a known enzyme.
@ -2293,9 +2293,7 @@ class RestrictionBatch(set):
self.already_mapped = str(dna), dna.linear
self.mapping = {x: x.search(dna) for x in self}
return self.mapping
raise TypeError(
"Expected Seq or MutableSeq instance, got %s instead" % type(dna)
)
raise TypeError(f"Expected Seq or MutableSeq instance, got {type(dna)} instead")
###############################################################################
@ -2351,9 +2349,9 @@ class Analysis(RestrictionBatch, PrintFormat):
search to only part of the sequence given to analyse.
"""
if not isinstance(start, int):
raise TypeError("expected int, got %s instead" % type(start))
raise TypeError(f"expected int, got {type(start)} instead")
if not isinstance(end, int):
raise TypeError("expected int, got %s instead" % type(end))
raise TypeError(f"expected int, got {type(end)} instead")
if start < 1: # Looks like this tries to do python list like indexing
start += len(self.sequence)
if end < 1:
@ -2427,10 +2425,10 @@ class Analysis(RestrictionBatch, PrintFormat):
setattr(self, k, v)
elif k in ("Cmodulo", "PrefWidth"):
raise AttributeError(
"To change %s, change NameWidth and/or ConsoleWidth" % k
f"To change {k}, change NameWidth and/or ConsoleWidth"
)
else:
raise AttributeError("Analysis has no attribute %s" % k)
raise AttributeError(f"Analysis has no attribute {k}")
def full(self, linear=True):
"""Perform analysis with all enzymes of batch and return all results.
@ -2491,7 +2489,7 @@ class Analysis(RestrictionBatch, PrintFormat):
"""Return only results from enzymes which names are listed."""
for i, enzyme in enumerate(names):
if enzyme not in AllEnzymes:
warnings.warn("no data for the enzyme: %s" % enzyme, BiopythonWarning)
warnings.warn(f"no data for the enzyme: {enzyme}", BiopythonWarning)
del names[i]
if not dct:
return RestrictionBatch(names).search(self.sequence, self.linear)

View File

@ -50,7 +50,7 @@ class Record:
line = line.rstrip() # no trailing whitespace
columns = line.split("\t") # separate the tab-delineated cols
if len(columns) != 6:
raise ValueError("I don't understand the format of %s" % line)
raise ValueError(f"I don't understand the format of {line}")
self.sid, pdbid, residues, self.sccs, self.sunid, hierarchy = columns
self.residues = Residues.Residues(residues)

View File

@ -55,7 +55,7 @@ class Record:
line = line.rstrip() # no trailing whitespace
columns = line.split("\t") # separate the tab-delineated cols
if len(columns) != 5:
raise ValueError("I don't understand the format of %s" % line)
raise ValueError(f"I don't understand the format of {line}")
sunid, self.nodetype, self.sccs, self.name, self.description = columns
if self.name == "-":

View File

@ -49,7 +49,7 @@ class Record:
line = line.rstrip() # no trailing whitespace
columns = line.split("\t") # separate the tab-delineated cols
if len(columns) != 4:
raise ValueError("I don't understand the format of %s" % line)
raise ValueError(f"I don't understand the format of {line}")
self.sid, pdbid, res, self.hierarchy = columns
self.residues = Residues(res)
self.residues.pdbid = pdbid

View File

@ -52,7 +52,7 @@ class Record:
line = line.rstrip() # no trailing whitespace
columns = line.split("\t") # separate the tab-delineated cols
if len(columns) != 3:
raise ValueError("I don't understand the format of %s" % line)
raise ValueError(f"I don't understand the format of {line}")
sunid, parent, children = columns

View File

@ -56,15 +56,15 @@ class Residues:
for l in str.split(","):
m = _fragment_re.match(l)
if m is None:
raise ValueError("I don't understand the format of %s" % l)
raise ValueError(f"I don't understand the format of {l}")
chain, start, end, postfix = m.groups()
if postfix != "":
raise ValueError("I don't understand the format of %s" % l)
raise ValueError(f"I don't understand the format of {l}")
if chain:
if chain[-1] != ":":
raise ValueError("I don't understand the chain in %s" % l)
raise ValueError(f"I don't understand the chain in {l}")
chain = chain[:-1] # chop off the ':'
else:
chain = ""
@ -84,7 +84,7 @@ class Residues:
for chain, start, end in self.fragments:
s = []
if chain:
s.append("%s:" % chain)
s.append(f"{chain}:")
if start:
s.append(f"{start}-{end}")
strs.append("".join(s))

View File

@ -157,7 +157,7 @@ def parse_domain(term):
def _open_scop_file(scop_dir_path, version, filetype):
filename = "dir.%s.scop.txt_%s" % (filetype, version)
filename = f"dir.{filetype}.scop.txt_{version}"
handle = open(os.path.join(scop_dir_path, filename))
return handle
@ -499,7 +499,7 @@ class Scop:
for p in self._sunidDict.values():
for c in p.children:
cur.execute("INSERT INTO hie VALUES (%s,%s)" % (p.sunid, c.sunid))
cur.execute(f"INSERT INTO hie VALUES ({p.sunid},{c.sunid})")
def write_cla_sql(self, handle):
"""Write CLA data to SQL database."""
@ -770,8 +770,8 @@ class Astral:
raise RuntimeError("must provide dir_path and version")
self.version = version
self.path = os.path.join(dir_path, "scopseq-%s" % version)
astral_file = "astral-scopdom-seqres-all-%s.fa" % self.version
self.path = os.path.join(dir_path, f"scopseq-{version}")
astral_file = f"astral-scopdom-seqres-all-{self.version}.fa"
astral_file = os.path.join(self.path, astral_file)
if astral_file:
@ -814,7 +814,7 @@ class Astral:
raise RuntimeError("No scopseq directory specified")
file_prefix = "astral-scopdom-seqres-sel-gs"
filename = "%s-bib-%s-%s.id" % (file_prefix, id, self.version)
filename = f"{file_prefix}-bib-{id}-{self.version}.id"
filename = os.path.join(self.path, filename)
self.IdDatasets[id] = self.getAstralDomainsFromFile(filename)
return self.IdDatasets[id]
@ -926,7 +926,7 @@ def search(
dir=None,
loc=None,
cgi="http://scop.mrc-lmb.cam.ac.uk/legacy/search.cgi",
**keywds
**keywds,
):
"""Access SCOP search and return a handle to the results.

View File

@ -85,7 +85,7 @@ def parse(handle):
elif key in ("DE", "SY", "GO", "HI", "WW"):
record[key].append(value)
else:
print("Ignoring: %s" % line.strip())
print(f"Ignoring: {line.strip()}")
# Read the footer and throw it away
for line in handle:
pass

View File

@ -371,7 +371,7 @@ def _read(handle):
_read_ft(record, line)
elif key == "SQ":
cols = value.split()
assert len(cols) == 7, "I don't understand SQ line %s" % line
assert len(cols) == 7, f"I don't understand SQ line {line}"
# Do more checking here?
record.seqinfo = int(cols[1]), int(cols[3]), cols[5]
elif key == " ":
@ -408,7 +408,7 @@ def _read(handle):
# **HA SAM; Annotated by PicoHamap 1.88; MF_01138.1; 09-NOV-2003.
pass
else:
raise SwissProtParserError("Unknown keyword '%s' found" % key, line=line)
raise SwissProtParserError(f"Unknown keyword '{key}' found", line=line)
if record:
raise ValueError("Unexpected end of stream.")
@ -435,13 +435,13 @@ def _read_id(record, line):
# check if the data class is one of the allowed values
allowed = ("STANDARD", "PRELIMINARY", "IPI", "Reviewed", "Unreviewed")
if record.data_class not in allowed:
message = "Unrecognized data class '%s'" % record.data_class
message = f"Unrecognized data class '{record.data_class}'"
raise SwissProtParserError(message, line=line)
# molecule_type should be 'PRT' for PRoTein
# Note that has been removed in recent releases (set to None)
if record.molecule_type not in (None, "PRT"):
message = "Unrecognized molecule type '%s'" % record.molecule_type
message = f"Unrecognized molecule type '{record.molecule_type}'"
raise SwissProtParserError(message, line=line)
@ -474,7 +474,7 @@ def _read_dt(record, line):
for index in range(len(uprcols)):
if "REL." in uprcols[index]:
rel_index = index
assert rel_index >= 0, "Could not find Rel. in DT line: %s" % line
assert rel_index >= 0, f"Could not find Rel. in DT line: {line}"
version_index = rel_index + 1
# get the version information
str_version = cols[version_index].rstrip(",")
@ -565,13 +565,13 @@ def _read_ox(record, line):
ids = line[5:].rstrip().rstrip(";")
else:
descr, ids = line[5:].rstrip().rstrip(";").split("=")
assert descr == "NCBI_TaxID", "Unexpected taxonomy type %s" % descr
assert descr == "NCBI_TaxID", f"Unexpected taxonomy type {descr}"
record.taxonomy_id.extend(ids.split(", "))
def _read_oh(record, line):
# Line type OH (Organism Host) for viral hosts
assert line[5:].startswith("NCBI_TaxID="), "Unexpected %s" % line
assert line[5:].startswith("NCBI_TaxID="), f"Unexpected {line}"
line = line[16:].rstrip()
assert line[-1] == "." and line.count(";") == 1, line
taxid, name = line[:-1].split(";")
@ -586,15 +586,13 @@ def _read_rn(reference, rn):
# RN [1] {ECO:0000313|EMBL:AEX14553.1}
words = rn.split(None, 1)
number = words[0]
assert number.startswith("[") and number.endswith("]"), (
"Missing brackets %s" % number
)
assert number.startswith("[") and number.endswith("]"), f"Missing brackets {number}"
reference.number = int(number[1:-1])
if len(words) > 1:
evidence = words[1]
assert evidence.startswith("{") and evidence.endswith("}"), (
"Missing braces %s" % evidence
)
assert evidence.startswith("{") and evidence.endswith(
"}"
), f"Missing braces {evidence}"
reference.evidence = evidence[1:-1].split("|")
@ -648,7 +646,7 @@ def _read_rx(reference, value):
if len(x) != 2 or x == ("DOI", "DOI"):
warn = True
break
assert len(x) == 2, "I don't understand RX line %s" % value
assert len(x) == 2, f"I don't understand RX line {value}"
reference.references.append((x[0], x[1].rstrip(";")))
# otherwise we assume we have the type 'RX MEDLINE; 85132727.'
else:
@ -662,7 +660,7 @@ def _read_rx(reference, value):
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Possibly corrupt RX line %r" % value, BiopythonParserWarning)
warnings.warn(f"Possibly corrupt RX line {value!r}", BiopythonParserWarning)
def _read_cc(record, line):

View File

@ -63,11 +63,11 @@ def _get_entry_dbs():
def _get_entry_fields(db):
return _get_fields(_BASE_URL + "/entry/%s?fields" % db)
return _get_fields(_BASE_URL + f"/entry/{db}?fields")
def _get_entry_formats(db):
return _get_fields(_BASE_URL + "/entry/%s?formats" % db)
return _get_fields(_BASE_URL + f"/entry/{db}?formats")
def _get_convert_formats():
@ -106,7 +106,7 @@ def entry(db, id, format=None, field=None):
_entry_db_names = _get_entry_dbs()
if db not in _entry_db_names:
raise ValueError(
"TogoWS entry fetch does not officially support database '%s'." % db
f"TogoWS entry fetch does not officially support database '{db}'."
)
if field:
try:
@ -179,7 +179,7 @@ def search_count(db, query):
data = handle.read()
handle.close()
if not data:
raise ValueError("TogoWS returned no data from URL %s" % url)
raise ValueError(f"TogoWS returned no data from URL {url}")
try:
return int(data.strip())
except ValueError:
@ -226,7 +226,7 @@ def search_iter(db, query, limit=None, batch=100):
raise RuntimeError("Same search results for previous offset")
for identifier in ids:
if identifier in prev_ids:
raise RuntimeError("Result %s was in previous batch" % identifier)
raise RuntimeError(f"Result {identifier} was in previous batch")
yield identifier
offset += batch
remain -= batch
@ -283,13 +283,13 @@ def search(db, query, offset=None, limit=None, format=None):
offset = int(offset)
except ValueError:
raise ValueError(
"Offset should be an integer (at least one), not %r" % offset
f"Offset should be an integer (at least one), not {offset!r}"
) from None
try:
limit = int(limit)
except ValueError:
raise ValueError(
"Limit should be an integer (at least one), not %r" % limit
f"Limit should be an integer (at least one), not {limit!r}"
) from None
if offset <= 0:
raise ValueError("Offset should be at least one, not %i" % offset)
@ -323,7 +323,7 @@ def convert(data, in_format, out_format):
_convert_formats = _get_convert_formats()
if [in_format, out_format] not in _convert_formats:
msg = "\n".join("%s -> %s" % tuple(pair) for pair in _convert_formats)
raise ValueError("Unsupported conversion. Choose from:\n%s" % msg)
raise ValueError(f"Unsupported conversion. Choose from:\n{msg}")
url = _BASE_URL + f"/convert/{in_format}.{out_format}"
# TODO - Should we just accept a string not a handle? What about a filename?
try:

View File

@ -250,12 +250,7 @@ class Record:
def __repr__(self):
"""Represent the UniGene Record object as a string for debugging."""
return "<%s> %s %s %s" % (
self.__class__.__name__,
self.ID,
self.symbol,
self.title,
)
return f"<{self.__class__.__name__}> {self.ID} {self.symbol} {self.title}"
def parse(handle):
@ -306,7 +301,7 @@ def _read(handle):
elif value == "NO":
record.homol = True
else:
raise ValueError("Cannot parse HOMOL line %s" % line)
raise ValueError(f"Cannot parse HOMOL line {line}")
elif tag == "EXPRESS":
record.express = [word.strip() for word in value.split("|")]
elif tag == "RESTR_EXPR":
@ -335,6 +330,6 @@ def _read(handle):
)
return record
else:
raise ValueError("Unknown tag %s" % tag)
raise ValueError(f"Unknown tag {tag}")
if record:
raise ValueError("Unexpected end of stream.")

View File

@ -72,7 +72,7 @@ def align(
):
"""Run an alignment. Returns a filehandle."""
if not pair or len(pair) != 2:
raise ValueError("Expected pair of filename, not %r" % pair)
raise ValueError(f"Expected pair of filename, not {pair!r}")
output_file = tempfile.NamedTemporaryFile(mode="r")
input_files = (
@ -103,7 +103,7 @@ def align(
)
if debug:
sys.stderr.write("%s\n" % cmdline_str)
sys.stderr.write(f"{cmdline_str}\n")
status = os.system(cmdline_str) >> 8
@ -113,7 +113,7 @@ def align(
sys.stderr.write("INFO trying again with the linear model\n")
return align(cmdline, pair, 0, force_type, dry_run, quiet, debug)
else:
raise OSError("%s returned %s" % (" ".join(cmdline), status))
raise OSError(f"{' '.join(cmdline)} returned {status}")
return output_file

View File

@ -85,14 +85,14 @@ class Statistics:
def __init__(self, filename, match, mismatch, gap, extension):
"""Initialize the class."""
self.matches = _fgrep_count('"SEQUENCE" %s' % match, filename)
self.mismatches = _fgrep_count('"SEQUENCE" %s' % mismatch, filename)
self.gaps = _fgrep_count('"INSERT" %s' % gap, filename)
self.matches = _fgrep_count(f'"SEQUENCE" {match}', filename)
self.mismatches = _fgrep_count(f'"SEQUENCE" {mismatch}', filename)
self.gaps = _fgrep_count(f'"INSERT" {gap}', filename)
if gap == extension:
self.extensions = 0
else:
self.extensions = _fgrep_count('"INSERT" %s' % extension, filename)
self.extensions = _fgrep_count(f'"INSERT" {extension}', filename)
self.score = (
match * self.matches
@ -158,8 +158,8 @@ def main():
for attr in ("matches", "mismatches", "gaps", "extensions")
)
)
print("identity_fraction: %s" % stats.identity_fraction())
print("coords: %s" % stats.coords)
print(f"identity_fraction: {stats.identity_fraction()}")
print(f"coords: {stats.coords}")
def _test(*args, **keywds):

View File

@ -63,7 +63,7 @@ class AlignmentColumn(list):
def __repr__(self):
"""Represent the AlignmentColumn object as a string for debugging."""
return "%s(%r, %r)" % (self.kind, self[0], self[1])
return f"{self.kind}({self[0]!r}, {self[1]!r})"
def append(self, column_unit):
"""Add a unit to the Column."""

View File

@ -120,7 +120,7 @@ def build(
for record in nucl_seqs:
key = record.id
if key in d:
raise ValueError("Duplicate key '%s'" % key)
raise ValueError(f"Duplicate key '{key}'")
d[key] = record
nucl_seqs = d
corr_method = 2
@ -156,7 +156,7 @@ def build(
try:
nucl_id = corr_dict[pro_rec.id]
except KeyError:
print("Protein record (%s) is not in corr_dict!" % pro_rec.id)
print(f"Protein record ({pro_rec.id}) is not in corr_dict!")
exit(1)
pro_nucl_pair.append((pro_rec, nucl_seqs[nucl_id]))

View File

@ -236,7 +236,7 @@ class CodonSeq(Seq):
def ungap(self, gap="-"):
"""Return a copy of the sequence without the gap character(s)."""
if len(gap) != 1 or not isinstance(gap, str):
raise ValueError("Unexpected gap character, %s" % repr(gap))
raise ValueError(f"Unexpected gap character, {repr(gap)}")
return CodonSeq(str(self).replace(gap, ""), rf_table=self.rf_table)
@classmethod

View File

@ -115,7 +115,7 @@ def write(plates, handle, format):
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError("Format string '%s' should be lower case" % format)
raise ValueError(f"Format string '{format}' should be lower case")
if isinstance(plates, phen_micro.PlateRecord):
plates = [plates]
@ -126,7 +126,7 @@ def write(plates, handle, format):
writer_class = _FormatToWriter[format]
count = writer_class(plates).write(fp)
else:
raise ValueError("Unknown format '%s'" % format)
raise ValueError(f"Unknown format '{format}'")
if not isinstance(count, int):
raise TypeError(
@ -166,7 +166,7 @@ def parse(handle, format):
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError("Format string '%s' should be lower case" % format)
raise ValueError(f"Format string '{format}' should be lower case")
with as_handle(handle) as fp:
# Map the file format to a sequence iterator:
@ -174,7 +174,7 @@ def parse(handle, format):
iterator_generator = _FormatToIterator[format]
i = iterator_generator(fp)
else:
raise ValueError("Unknown format '%s'" % format)
raise ValueError(f"Unknown format '{format}'")
yield from i

View File

@ -182,7 +182,7 @@ class PlateRecord:
# Value should be of WellRecord type
if not isinstance(obj, WellRecord):
raise ValueError(
"A WellRecord type object is needed as value (got %s)" % type(obj)
f"A WellRecord type object is needed as value (got {type(obj)})"
)
def __getitem__(self, index):
@ -291,7 +291,7 @@ class PlateRecord:
try:
return self._wells[index]
except KeyError:
raise KeyError("Well %s not found!" % index)
raise KeyError(f"Well {index} not found!")
# Integer index
elif isinstance(index, int):
@ -567,7 +567,7 @@ class PlateRecord:
"""
lines = []
if self.id:
lines.append("Plate ID: %s" % self.id)
lines.append(f"Plate ID: {self.id}")
lines.append("Well: %i" % len(self))
# Here we assume that all well ID start with a char
lines.append("Rows: %d" % len({x.id[0] for x in self}))
@ -820,9 +820,9 @@ class WellRecord:
"""
lines = []
if self.plate and self.plate.id:
lines.append("Plate ID: %s" % self.plate.id)
lines.append(f"Plate ID: {self.plate.id}")
if self.id:
lines.append("Well ID: %s" % self.id)
lines.append(f"Well ID: {self.id}")
lines.append("Time points: %i" % len(self))
lines.append("Minum signal %.2f at time %.2f" % min(self, key=lambda x: x[1]))
lines.append("Maximum signal %.2f at time %.2f" % max(self, key=lambda x: x[1]))
@ -876,7 +876,7 @@ class WellRecord:
return
for sigmoid_func in function:
if sigmoid_func not in avail_func:
raise ValueError("Fitting function %r not supported" % sigmoid_func)
raise ValueError(f"Fitting function {sigmoid_func!r} not supported")
# Parameters that depend on scipy curve_fit
from .pm_fitting import fit, get_area
@ -935,7 +935,7 @@ def JsonIterator(handle):
_platesPrefixMammalian
):
warnings.warn(
"Non-standard plate ID found (%s)" % plateID, BiopythonParserWarning
f"Non-standard plate ID found ({plateID})", BiopythonParserWarning
)
else:
# Simplify the plates IDs, removing letters, as opm does
@ -953,7 +953,7 @@ def JsonIterator(handle):
# No luck
if len(pID) == 0:
warnings.warn(
"Non-standard plate ID found (%s)" % plateID, BiopythonParserWarning
f"Non-standard plate ID found ({plateID})", BiopythonParserWarning
)
elif int(pID) < 0:
warnings.warn(
@ -1037,7 +1037,7 @@ def CsvIterator(handle):
_platesPrefixMammalian
):
warnings.warn(
"Non-standard plate ID found (%s)" % plateID, BiopythonParserWarning
f"Non-standard plate ID found ({plateID})", BiopythonParserWarning
)
else:
# Simplify the plates IDs, removing letters, as opm does
@ -1055,7 +1055,7 @@ def CsvIterator(handle):
# No luck
if len(pID) == 0:
warnings.warn(
"Non-standard plate ID found (%s)" % plateID,
f"Non-standard plate ID found ({plateID})",
BiopythonParserWarning,
)
elif int(pID) < 0: