Misc spellings flagged by codespell (#4716)

Note the childs to children changes in Bio/Phylo/Consensus.py
included fixing internal variable names only.

Not applying all the catergories ==> categories fixes
to the (deprecated) EMBOSS wrappers as some would be
functional changes.

Also no more LaTeX files (for spell checking)
This commit is contained in:
Peter Cock
2024-05-01 14:21:12 +01:00
committed by GitHub
parent f6c6c6a460
commit a47460e59a
44 changed files with 86 additions and 91 deletions

View File

@ -74,7 +74,7 @@ repos:
rev: v2.2.6
hooks:
- id: codespell
files: \.(rst|md|tex)$
files: \.(rst|md)$
args: [
--ignore-regex,
'(^|\W)([A-Z]{2,3})(\W|$)',

View File

@ -4290,7 +4290,7 @@ def parse(source, fmt):
- source - File or file-like object to read from, or filename as string.
- fmt - String describing the file format (case-insensitive).
Typical usage, opening a file to read in, and looping over the aligments:
Typical usage, opening a file to read in, and looping over the alignments:
>>> from Bio import Align
>>> filename = "Exonerate/exn_22_m_ner_cigar.exn"

View File

@ -571,7 +571,7 @@ class AlignmentIterator(interfaces.AlignmentIterator):
% (query_step, target_step)
)
elif operation == "N": # Non-equivalenced (unaligned) region
operation = "U" # 'N' is alread used for introns in SAM/BAM
operation = "U" # 'N' is already used for introns in SAM/BAM
if target_step > 0:
ts += target_step
coordinates[0, i + 1] = ts

View File

@ -8,7 +8,7 @@
# package.
"""Alphabets were previously used to declare sequence type and letters (OBSOLETE).
The design of Bio.Aphabet included a number of historic design choices
The design of Bio.Alphabet included a number of historic design choices
which, with the benefit of hindsight, were regretable. Bio.Alphabet was
therefore removed from Biopython in release 1.78. Instead, the molecule type is
included as an annotation on SeqRecords where appropriate.

View File

@ -291,7 +291,7 @@ nucleic_letters_3to1_extended = {
"8MG": "G", "8OG": "G", "8PY": "G", "8AA": "G", "85Y": "U", "8OS": "G",
}
# Solvent accesibility scales
# Solvent accessibility scales
residue_sasa_scales = {
# Ahmad: Ahmad et al. 2003 https://doi.org/10.1002/prot.10328
"Ahmad": {

View File

@ -358,9 +358,7 @@ class FDNADistCommandline(_EmbossCommandLine):
),
_Option(["-method", "method"], "sub. model [f,k,j,l,s]", is_required=True),
_Option(["-gamma", "gamma"], "gamma [g, i,n]"),
_Option(
["-ncategories", "ncategories"], "number of rate catergories (1-9)"
),
_Option(["-ncategories", "ncategories"], "number of rate categories (1-9)"),
_Option(["-rate", "rate"], "rate for each category"),
_Option(
["-categories", "categories"], "File of substitution rate categories"
@ -371,7 +369,7 @@ class FDNADistCommandline(_EmbossCommandLine):
),
_Option(["-invarfrac", "invarfrac"], "proportoin of invariant sites"),
_Option(["-ttratio", "ttratio"], "ts/tv ratio"),
_Option(["-freqsfrom", "freqsfrom"], "use emprical base freqs"),
_Option(["-freqsfrom", "freqsfrom"], "use empirical base freqs"),
_Option(["-basefreq", "basefreq"], "specify basefreqs"),
_Option(["-lower", "lower"], "lower triangle matrix (y/N)"),
]
@ -579,9 +577,7 @@ class FProtDistCommandline(_EmbossCommandLine):
filename=True,
is_required=True,
),
_Option(
["-ncategories", "ncategories"], "number of rate catergories (1-9)"
),
_Option(["-ncategories", "ncategories"], "number of rate categories (1-9)"),
_Option(["-rate", "rate"], "rate for each category"),
_Option(["-catergories", "catergories"], "file of rates"),
_Option(["-weights", "weights"], "weights file"),
@ -596,7 +592,7 @@ class FProtDistCommandline(_EmbossCommandLine):
),
_Option(["-aacateg", "aacateg"], "Choose the category to use [G,C,H]"),
_Option(["-whichcode", "whichcode"], "genetic code [c,m,v,f,y]"),
_Option(["-ease", "ease"], "Pob change catergory (float between -0 and 1)"),
_Option(["-ease", "ease"], "Pob change category (float between -0 and 1)"),
_Option(["-ttratio", "ttratio"], "Transition/transversion ratio (0-1)"),
_Option(
["-basefreq", "basefreq"], "DNA base frequencies (space separated list)"

View File

@ -325,7 +325,7 @@ class _SQLiteManySeqFilesDict(_IndexedSeqFileDict):
self._build_index()
def _load_index(self):
"""Call from __init__ to re-use an existing index (PRIVATE)."""
"""Call from __init__ to reuse an existing index (PRIVATE)."""
index_filename = self._index_filename
relative_path = self._relative_path
filenames = self._filenames

View File

@ -527,12 +527,12 @@ class HiddenMarkovModel:
return self._transition_pseudo
def get_blank_emissions(self):
"""Get the starting default emmissions for each sequence.
"""Get the starting default emissions for each sequence.
This returns a dictionary of the default emmissions for each
This returns a dictionary of the default emissions for each
letter. The dictionary is structured with keys as
(seq_letter, emmission_letter) and values as the starting number
of emmissions.
(seq_letter, emission_letter) and values as the starting number
of emissions.
"""
return self._emission_pseudo

View File

@ -80,13 +80,13 @@ class AbstractTrainer:
return total_likelihood
def estimate_params(self, transition_counts, emission_counts):
"""Get a maximum likelihood estimation of transition and emmission.
"""Get a maximum likelihood estimation of transition and emission.
Arguments:
- transition_counts -- A dictionary with the total number of counts
of transitions between two states.
- emissions_counts -- A dictionary with the total number of counts
of emmissions of a particular emission letter by a state letter.
of emissions of a particular emission letter by a state letter.
This then returns the maximum likelihood estimators for the
transitions and emissions, estimated by formulas 3.18 in

View File

@ -16,7 +16,7 @@ Functions:
- classify Classify an observation into a class.
This module has been deprecated, please consider an alternative like scikit-learn
insead.
instead.
"""
import warnings

View File

@ -50,7 +50,7 @@ except ImportError:
def _contents(items):
"""Return a dictionary where the key is the item and the value is the probablity associated (PRIVATE)."""
"""Return a dictionary where the key is the item and the value is the probability associated (PRIVATE)."""
term = 1.0 / len(items)
counts = {}
for item in items:

View File

@ -143,7 +143,7 @@ def dssp_dict_from_pdb_file(in_file, DSSP="dssp", dssp_version="3.9.9"):
DSSP executable (argument to subprocess)
dssp_version : string
Version of DSSP excutable
Version of DSSP executable
Returns
-------

View File

@ -203,8 +203,8 @@ def read_PIC(
"""Create Hedron on current (sbcic) Chain.internal_coord."""
ek = (akcache(a1), akcache(a2), akcache(a3))
atmNdx = AtomKey.fields.atm
accpt = IC_Residue.accept_atoms
if not all(ek[i].akl[atmNdx] in accpt for i in range(3)):
accept = IC_Residue.accept_atoms
if not all(ek[i].akl[atmNdx] in accept for i in range(3)):
return
hl12[ek] = float(l12)
ha[ek] = float(ang)
@ -295,8 +295,8 @@ def read_PIC(
akcache(a4),
)
atmNdx = AtomKey.fields.atm
accpt = IC_Residue.accept_atoms
if not all(ek[i].akl[atmNdx] in accpt for i in range(4)):
accept = IC_Residue.accept_atoms
if not all(ek[i].akl[atmNdx] in accept for i in range(4)):
return
dangle = float(dangle)
dangle = dangle if (dangle <= 180.0) else dangle - 360.0
@ -454,7 +454,7 @@ def read_PIC(
# rnext should be set
def ake_recurse(akList: List) -> List:
"""Bulid combinatorics of AtomKey lists."""
"""Build combinatorics of AtomKey lists."""
car = akList[0]
if len(akList) > 1:
retList = []
@ -1104,9 +1104,9 @@ def write_PIC(
hdr.upper(), (dd or ""), (pdbid or "")
)
)
nam = entity.header.get("name", None)
if nam:
fp.write("TITLE " + nam.upper() + "\n")
name = entity.header.get("name", None)
if name:
fp.write("TITLE " + name.upper() + "\n")
for mdl in entity:
write_PIC(
mdl,

View File

@ -134,8 +134,8 @@ def report_IC(
hdr = entity.header.get("head", None)
if hdr:
reportDict["hdr"] += 1
nam = entity.header.get("name", None)
if nam:
name = entity.header.get("name", None)
if name:
reportDict["hdr"] += 1
for mdl in entity:
reportDict = report_IC(mdl, reportDict)
@ -492,9 +492,9 @@ def write_PDB(
hdr.upper(), (dd or ""), (pdbid or "")
)
)
nam = entity.header.get("name", None)
if nam:
fp.write("TITLE " + nam.upper() + "\n")
name = entity.header.get("name", None)
if name:
fp.write("TITLE " + name.upper() + "\n")
io = PDBIO()
io.set_structure(entity)
io.save(fp, preserve_atom_numbering=True)

View File

@ -1347,7 +1347,7 @@ class IC_Chain:
"""
if np.any(self.hAtoms_needs_update):
# hedra inital coords
# hedra initial coords
# sar = supplementary angle radian: angles which add to 180
sar = np.deg2rad(180.0 - self.hedraAngle[self.hAtoms_needs_update]) # angle
@ -2058,7 +2058,7 @@ class IC_Chain:
def distplot_to_dh_arrays(
self, distplot: np.ndarray, dihedra_signs: np.ndarray
) -> None:
"""Load di/hedra distance arays from distplot.
"""Load di/hedra distance arrays from distplot.
Fill :class:`IC_Chain` arrays hedraL12, L23, L13 and dihedraL14
distance value arrays from input distplot, dihedra_signs array from
@ -2069,7 +2069,7 @@ class IC_Chain:
Call :meth:`atom_to_internal_coordinates` (or at least :meth:`init_edra`)
to generate a2ha_map and d2a_map before running this.
Explcitly removed from :meth:`.distance_to_internal_coordinates` so
Explicitly removed from :meth:`.distance_to_internal_coordinates` so
user may populate these chain di/hedra arrays by other
methods.
"""
@ -2116,7 +2116,7 @@ class IC_Chain:
:param bool resetAtoms: default True.
Mark all atoms in di/hedra and atomArray for updating by
:meth:`.internal_to_atom_coordinates`. Alternatvely set this to
:meth:`.internal_to_atom_coordinates`. Alternatively set this to
False and manipulate `atomArrayValid`, `dAtoms_needs_update` and
`hAtoms_needs_update` directly to reduce computation.
""" # noqa
@ -3727,12 +3727,12 @@ class IC_Residue:
protein chain definitions in :mod:`.ic_data` and :meth:`_create_edra`
(e.g. psi overlaps N-CA-C-O).
Te default overlap=True is probably what you want for:
The default overlap=True is probably what you want for:
`set_angle("chi1", val)`
The default is probably NOT what you want when processing all dihedrals
in a chain or residue (such as copying from another structure), as the
overlaping dihedra will likely be in the set as well.
overlapping dihedra will likely be in the set as well.
N.B. setting e.g. PRO chi2 is permitted without error or warning!
@ -3775,7 +3775,7 @@ class IC_Residue:
Changes a dihedral angle by a given delta, i.e.
new_angle = current_angle + delta
Values are adjusted so new_angle iwll be within +/-180.
Values are adjusted so new_angle will be within +/-180.
Changes overlapping dihedra as in :meth:`.set_angle`
@ -3911,7 +3911,7 @@ class Edron:
re_class: str
sequence of residue, atoms comprising di/hedron for statistics
cre_class: str
sequence of covalent radii classses comprising di/hedron for statistics
sequence of covalent radii classes comprising di/hedron for statistics
edron_re: compiled regex (Class Attribute)
A compiled regular expression matching string IDs for Hedron
and Dihedron objects

View File

@ -110,7 +110,7 @@ class RaxmlCommandline(AbstractCommandline):
e: Optimize model+branch lengths for given input tree under
GAMMA/GAMMAI only.
g: Compute per site log Likelihoods for one ore more trees
g: Compute per site log Likelihoods for one or more trees
passed via '-z' and write them to a file that can be read
by CONSEL.

View File

@ -257,9 +257,8 @@ def strict_consensus(trees):
if bs.contains(bitstr):
# remove old bitstring
del bitstr_clades[bs]
# update clade childs
new_childs = [child for child in c.clades if child not in clade_terms]
c.clades = new_childs
# update clade children
c.clades = [child for child in c.clades if child not in clade_terms]
# set current clade as child of c
c.clades.append(clade)
# update bitstring
@ -324,7 +323,7 @@ def majority_consensus(trees, cutoff=0):
# record its possible parent and child clades.
compatible = True
parent_bitstr = None
child_bitstrs = [] # multiple independent childs
child_bitstrs = [] # multiple independent children
for bs in bsckeys:
if not bs.iscompatible(bitstr):
compatible = False
@ -347,7 +346,7 @@ def majority_consensus(trees, cutoff=0):
if parent_bitstr:
# insert current clade; remove old bitstring
parent_clade = bitstr_clades.pop(parent_bitstr)
# update parent clade childs
# update parent clade children
parent_clade.clades = [
c for c in parent_clade.clades if c not in clade_terms
]
@ -457,15 +456,15 @@ def _sub_clade(clade, term_names):
for c in sub_clade.find_clades(terminal=False, order="preorder"):
if c == sub_clade.root:
continue
childs = set(c.find_clades(terminal=True)) & set(term_clades)
if childs:
children = set(c.find_clades(terminal=True)) & set(term_clades)
if children:
for tc in temp_clade.find_clades(terminal=False, order="preorder"):
tc_childs = set(tc.clades)
tc_new_clades = tc_childs - childs
if childs.issubset(tc_childs) and tc_new_clades:
tc_children = set(tc.clades)
tc_new_clades = tc_children - children
if children.issubset(tc_children) and tc_new_clades:
tc.clades = list(tc_new_clades)
child_clade = BaseTree.Clade()
child_clade.clades.extend(list(childs))
child_clade.clades.extend(list(children))
tc.clades.append(child_clade)
sub_clade = temp_clade
return sub_clade

View File

@ -323,7 +323,7 @@ class GenePopController:
):
"""Use Hardy-Weinberg test for heterozygote deficiency.
Returns a population iterator containing a dictionary wehre
Returns a population iterator containing a dictionary where
dictionary[locus]=(P-val, SE, Fis-WC, Fis-RH, steps).
Some loci have a None if the info is not available.

View File

@ -949,7 +949,7 @@ class Palindromic(AbstractCut):
@classmethod
def is_palindromic(cls):
"""Return if the enzyme has a palindromic recoginition site."""
"""Return if the enzyme has a palindromic recognition site."""
return True
@ -991,7 +991,7 @@ class NonPalindromic(AbstractCut):
@classmethod
def is_palindromic(cls):
"""Return if the enzyme has a palindromic recoginition site."""
"""Return if the enzyme has a palindromic recognition site."""
return False

View File

@ -1392,7 +1392,7 @@ class SimpleLocation(Location):
return f_seq
FeatureLocation = SimpleLocation # OBSOLETE; for backward compatability only.
FeatureLocation = SimpleLocation # OBSOLETE; for backward compatibility only.
class CompoundLocation(Location):

View File

@ -82,7 +82,7 @@ def PhdIterator(source: _TextIOSource) -> Iterator[SeqRecord]:
seq_record = SeqRecord(
phd_record.seq, id=name, name=name, description=phd_record.file_name
)
# Just re-use the comments dictionary as the SeqRecord's annotations
# Just reuse the comments dictionary as the SeqRecord's annotations
seq_record.annotations = phd_record.comments
seq_record.annotations["molecule_type"] = "DNA"
# And store the qualities and peak locations as per-letter-annotation

View File

@ -70,7 +70,7 @@ methods that allow it to be used as a dictionary.
# reserved - always zero for now
# packedDna - the DNA packed to two bits per base, represented as so:
# T - 00, C - 01, A - 10, G - 11. The first base is in the most
# significant 2-bit byte; the last base is in the least significan
# significant 2-bit byte; the last base is in the least significant
# 2 bits. For example, the sequence TCAG is represented as 00011011.
try:
import numpy as np

View File

@ -293,7 +293,7 @@ names are also used in Bio.AlignIO and include the following:
- gb - An alias for "genbank", for consistency with NCBI Entrez Utilities
- gfa1 - Graphical Fragment Assemblyv versions 1.x. Only segment lines
are parsed and all linkage information is ignored.
- gfa2 - Graphical Fragement Assembly version 2.0. Only segment lines are
- gfa2 - Graphical Fragment Assembly version 2.0. Only segment lines are
parsed and all linkage information is ignored.
- ig - The IntelliGenetics file format, apparently the same as the
MASE alignment format.

View File

@ -24,7 +24,7 @@ General parameters for most Tm methods:
True). In general, whitespaces and non-base characters are removed and
characters are converted to uppercase. RNA will be backtranscribed.
- strict -- Do not allow base characters or neighbor duplex keys (e.g.
'AT/NA') that could not or not unambigiously be evaluated for the respective
'AT/NA') that could not or not unambiguously be evaluated for the respective
method (default=True). Note that W (= A or T) and S (= C or G) are not
ambiguous for Tm_Wallace and Tm_GC. If 'False', average values (if
applicable) will be used.

View File

@ -663,12 +663,12 @@ class CodonAdaptationIndex(dict):
- seq_type: String specifying type of sequence provided.
Options are "DNA", "RNA", and "protein". Default is "DNA".
- strict: Determines whether an exception should be raised when
two codons are equally prefered for a given amino acid.
two codons are equally preferred for a given amino acid.
Returns:
Seq object with DNA encoding the same protein as the sequence argument,
but using only preferred codons as defined by the codon adaptation index.
If multiple codons are equally preferred, a warning is issued
and one codon is chosen for use in the optimzed sequence.
and one codon is chosen for use in the optimized sequence.
"""
try: # If seq record is provided, convert to sequence
sequence = sequence.seq

View File

@ -544,7 +544,7 @@ def _read_dt(record, line):
version = 0
date = cols[0].rstrip(",")
# Re-use the historical property names, even though
# Reuse the historical property names, even though
# the meaning has changed slightly:
if "INTEGRATED" in uprline:
record.created = date, version

View File

@ -82,7 +82,7 @@ def entry(db, id, format=None, field=None):
Arguments:
- db - database (string), see list below.
- id - identier (string) or a list of identifiers (either as a list of
- id - identifier (string) or a list of identifiers (either as a list of
strings or a single string with comma separators).
- format - return data file format (string), options depend on the database
e.g. "xml", "json", "gff", "fasta", "ttl" (RDF Turtle)

View File

@ -796,7 +796,7 @@ class BgzfWriter:
"""Define a BGZFWriter object."""
def __init__(self, filename=None, mode="w", fileobj=None, compresslevel=6):
"""Initilize the class."""
"""Initialize the class."""
if filename and fileobj:
raise ValueError("Supply either filename or fileobj, not both")
# If an open file was passed, make sure it was opened in binary mode.

View File

@ -27,7 +27,7 @@ Weighting Functions:
- equal_weight Every example is given a weight of 1.
This module has been deprecated, please consider an alternative like scikit-learn
insead.
instead.
"""
import warnings

View File

@ -71,7 +71,7 @@ class Motif(motifs.Motif):
return version
def __str__(self):
"""Return a string represention of the JASPAR profile.
"""Return a string representation of the JASPAR profile.
We choose to provide only the filled metadata information.
"""

View File

@ -101,7 +101,7 @@ class JASPAR5:
self.dbh = mdb.connect(host, user, password, name)
def __str__(self):
"""Return a string represention of the JASPAR5 DB connection."""
"""Return a string representation of the JASPAR5 DB connection."""
return rf"{self.user}\@{self.host}:{self.name}"
def fetch_motif_by_id(self, id):
@ -427,7 +427,7 @@ class JASPAR5:
elif attr == "comment":
motif.comment = val
else:
# TODO If we were to implement additional abitrary tags
# TODO If we were to implement additional arbitrary tags
# motif.tag(attr, val)
pass

View File

@ -546,7 +546,7 @@ class DatabaseLoader:
# we could verify that the Scientific Name etc in the database
# is the same and update it or print a warning if not...
if len(rows) != 1:
raise ValueError(f"Expected 1 reponse, got {len(rows)}")
raise ValueError(f"Expected 1 response, got {len(rows)}")
return rows[0]
# We have to record this.

View File

@ -6,7 +6,7 @@
import unittest
# Really do want "import *" to get all the test clases:
# Really do want "import *" to get all the test classes:
from common_BioSQL import * # noqa: F403
# Import these explicitly to avoid flake8 F405 below:

View File

@ -6,7 +6,7 @@
import unittest
# Really do want "import *" to get all the test clases:
# Really do want "import *" to get all the test classes:
from common_BioSQL import * # noqa: F403
# Import these explicitly to avoid flake8 F405 below:

View File

@ -6,7 +6,7 @@
import unittest
# Really do want "import *" to get all the test clases:
# Really do want "import *" to get all the test classes:
from common_BioSQL import * # noqa: F403
from common_BioSQL_online import * # noqa: F403

View File

@ -6,7 +6,7 @@
import unittest
# Really do want "import *" to get all the test clases:
# Really do want "import *" to get all the test classes:
from common_BioSQL import * # noqa: F403
# Import these explicitly to avoid flake8 F405

View File

@ -6,7 +6,7 @@
import unittest
# Really do want "import *" to get all the test clases:
# Really do want "import *" to get all the test classes:
from common_BioSQL import * # noqa: F403
from common_BioSQL_online import * # noqa: F403

View File

@ -53,7 +53,7 @@ if False:
class BackwardsCompatibilityTest(SeqRecordTestBaseClass):
def test_backwards_compatibility(self):
"""Check can re-use an old BioSQL SQLite3 database."""
"""Check can reuse an old BioSQL SQLite3 database."""
original_records = []
for record in SeqIO.parse("GenBank/cor6_6.gb", "gb"):
if record.annotations["molecule_type"] == "mRNA":

View File

@ -6,7 +6,7 @@
import unittest
# Really do want "import *" to get all the test clases:
# Really do want "import *" to get all the test classes:
from common_BioSQL import * # noqa: F403
from common_BioSQL_online import * # noqa: F403

View File

@ -124,7 +124,7 @@ def check_request_ids(testcase, params, expected):
:type testcase: unittest.TestCase
:param params: Parsed parameter dictionary returned by `deconstruct_request`.
:type params: dict
:param expected: Expected set of IDs, as colleciton of strings.
:param expected: Expected set of IDs, as collection of strings.
"""
testcase.assertEqual(len(params["id"]), 1)
ids_str = params["id"][0]

View File

@ -575,7 +575,7 @@ if sqlite3:
def test_correct_spliced_sequences_2(self):
"""Checking that spliced sequences are correct.
We get spliced alignements from following MAF blocks
We get spliced alignments from following MAF blocks
and check that the sequences are correct:
a score=19159.000000

View File

@ -16,7 +16,7 @@ with warnings.catch_warnings():
warnings.simplefilter("ignore", category=BiopythonDeprecationWarning)
from Bio.PopGen.GenePop.EasyController import EasyController
# Tests genepop related code for easy contorller. Note: this requires genepop
# Tests genepop related code for easy controller. Note: this requires genepop
# test_PopGen_GenePop_nodepend tests code that does not require genepop
found = False

View File

@ -2410,7 +2410,7 @@ class TestLoading(unittest.TestCase):
try:
m = substitution_matrices.load(name)
except Exception:
self.fail(f"Failed to load subsitution matrix '{name}'")
self.fail(f"Failed to load substitution matrix '{name}'")
def test_reading(self):
"""Confirm matrix reading works with filename or handle."""

View File

@ -128,9 +128,9 @@ class WriteMMTF(unittest.TestCase):
def test_write(self):
"""Test a simple structure object is written out correctly to MMTF."""
parser = MMCIFParser()
struc = parser.get_structure("1A8O", "PDB/1A8O.cif")
structure = parser.get_structure("1A8O", "PDB/1A8O.cif")
io = MMTFIO()
io.set_structure(struc)
io.set_structure(structure)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
@ -177,9 +177,9 @@ class WriteMMTF(unittest.TestCase):
def test_multi_model_write(self):
"""Test multiple models are written out correctly to MMTF."""
parser = PDBParser()
struc = parser.get_structure("1SSU_mod", "PDB/1SSU_mod.pdb")
structure = parser.get_structure("1SSU_mod", "PDB/1SSU_mod.pdb")
io = MMTFIO()
io.set_structure(struc)
io.set_structure(structure)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
@ -206,9 +206,9 @@ class WriteMMTF(unittest.TestCase):
def test_selection_write(self):
"""Test the use of a Select subclass when writing MMTF files."""
struc = MMTFParser.get_structure("PDB/4CUP.mmtf")
structure = MMTFParser.get_structure("PDB/4CUP.mmtf")
io = MMTFIO()
io.set_structure(struc)
io.set_structure(structure)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)