mirror of
https://github.com/biopython/biopython.git
synced 2025-10-20 13:43:47 +08:00
pep 585 typing rewrites etc using pyupgrade 3.16.0
$ pyupgrade --keep-percent-format --py39-plus \ Bio/*.py Bio/*/*.py BioSQL/*.py Didn't find any changes in tests, scripts, or docs. Followed by removing a few now redundant imports and black in one case.
This commit is contained in:
@ -4253,7 +4253,7 @@ formats = (
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
_modules: Dict[str, types.ModuleType] = {}
|
||||
_modules: dict[str, types.ModuleType] = {}
|
||||
|
||||
|
||||
def _load(fmt: str) -> types.ModuleType:
|
||||
|
@ -224,7 +224,7 @@ class MauveWriter(SequentialAlignmentWriter):
|
||||
class MauveIterator(AlignmentIterator):
|
||||
"""Mauve xmfa alignment iterator."""
|
||||
|
||||
_ids: List[str] = [] # for caching IDs between __next__ calls
|
||||
_ids: list[str] = [] # for caching IDs between __next__ calls
|
||||
|
||||
def __next__(self):
|
||||
"""Parse the next alignment from the handle."""
|
||||
|
@ -15,7 +15,7 @@ sequences as SeqRecord objects.
|
||||
"""
|
||||
|
||||
from typing import IO
|
||||
from typing import Iterator
|
||||
from collections.abc import Iterator
|
||||
from typing import Optional
|
||||
|
||||
from Bio.Align import MultipleSeqAlignment
|
||||
|
@ -50,20 +50,20 @@ class TranslationError(Exception):
|
||||
class CodonTable:
|
||||
"""A codon-table, or genetic code."""
|
||||
|
||||
forward_table: Dict[str, str] = {} # only includes codons which actually code
|
||||
back_table: Dict[str, str] = {} # for back translations
|
||||
start_codons: List[str] = []
|
||||
stop_codons: List[str] = []
|
||||
forward_table: dict[str, str] = {} # only includes codons which actually code
|
||||
back_table: dict[str, str] = {} # for back translations
|
||||
start_codons: list[str] = []
|
||||
stop_codons: list[str] = []
|
||||
|
||||
# Not always called from derived classes!
|
||||
def __init__(
|
||||
self,
|
||||
nucleotide_alphabet: Optional[str] = None,
|
||||
protein_alphabet: Optional[str] = None,
|
||||
forward_table: Dict[str, str] = forward_table,
|
||||
back_table: Dict[str, str] = back_table,
|
||||
start_codons: List[str] = start_codons,
|
||||
stop_codons: List[str] = stop_codons,
|
||||
forward_table: dict[str, str] = forward_table,
|
||||
back_table: dict[str, str] = back_table,
|
||||
start_codons: list[str] = start_codons,
|
||||
stop_codons: list[str] = stop_codons,
|
||||
) -> None:
|
||||
"""Initialize the class."""
|
||||
self.nucleotide_alphabet = nucleotide_alphabet
|
||||
|
@ -1168,7 +1168,7 @@ class GenBankScanner(InsdcScanner):
|
||||
RECORD_START = "LOCUS "
|
||||
HEADER_WIDTH = 12
|
||||
FEATURE_START_MARKERS = ["FEATURES Location/Qualifiers", "FEATURES"]
|
||||
FEATURE_END_MARKERS: List[str] = []
|
||||
FEATURE_END_MARKERS: list[str] = []
|
||||
FEATURE_QUALIFIER_INDENT = 21
|
||||
FEATURE_QUALIFIER_SPACER = " " * FEATURE_QUALIFIER_INDENT
|
||||
SEQUENCE_HEADERS = [
|
||||
|
@ -34,7 +34,7 @@ class Chain:
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize a node chain."""
|
||||
self.chain: Dict[int, "Node"] = {}
|
||||
self.chain: dict[int, "Node"] = {}
|
||||
self.id = -1
|
||||
|
||||
def _get_id(self) -> int:
|
||||
@ -42,7 +42,7 @@ class Chain:
|
||||
self.id += 1
|
||||
return self.id
|
||||
|
||||
def all_ids(self) -> List[int]:
|
||||
def all_ids(self) -> list[int]:
|
||||
"""Return a list of all node ids."""
|
||||
return list(self.chain.keys())
|
||||
|
||||
|
@ -382,9 +382,7 @@ class DSSP(AbstractResiduePropertyMap):
|
||||
# calling 'dssp' will not work in some operating systems
|
||||
# (Debian distribution of DSSP includes a symlink for 'dssp' argument)
|
||||
try:
|
||||
version_string = subprocess.check_output(
|
||||
[dssp, "--version"], universal_newlines=True
|
||||
)
|
||||
version_string = subprocess.check_output([dssp, "--version"], text=True)
|
||||
dssp_version = re.search(r"\s*([\d.]+)", version_string).group(1)
|
||||
dssp_dict, dssp_keys = dssp_dict_from_pdb_file(
|
||||
in_file, dssp, dssp_version
|
||||
@ -396,9 +394,7 @@ class DSSP(AbstractResiduePropertyMap):
|
||||
dssp = "dssp"
|
||||
else:
|
||||
raise
|
||||
version_string = subprocess.check_output(
|
||||
[dssp, "--version"], universal_newlines=True
|
||||
)
|
||||
version_string = subprocess.check_output([dssp, "--version"], text=True)
|
||||
dssp_version = re.search(r"\s*([\d.]+)", version_string).group(1)
|
||||
dssp_dict, dssp_keys = dssp_dict_from_pdb_file(
|
||||
in_file, dssp, dssp_version
|
||||
|
@ -42,8 +42,8 @@ class Entity(Generic[_Parent, _Child]):
|
||||
"""
|
||||
|
||||
parent: Optional[_Parent]
|
||||
child_list: List[_Child]
|
||||
child_dict: Dict[Any, _Child]
|
||||
child_list: list[_Child]
|
||||
child_dict: dict[Any, _Child]
|
||||
level: str
|
||||
|
||||
def __init__(self, id):
|
||||
|
@ -412,7 +412,7 @@ class PDBList:
|
||||
|
||||
def download_pdb_files(
|
||||
self,
|
||||
pdb_codes: List[str],
|
||||
pdb_codes: list[str],
|
||||
obsolete: bool = False,
|
||||
pdir: Optional[str] = None,
|
||||
file_format: Optional[str] = None,
|
||||
@ -458,7 +458,7 @@ class PDBList:
|
||||
pdb_codes,
|
||||
)
|
||||
|
||||
def get_all_assemblies(self, file_format: str = "") -> List[Tuple[str, str]]:
|
||||
def get_all_assemblies(self, file_format: str = "") -> list[tuple[str, str]]:
|
||||
"""Retrieve the list of PDB entries with an associated bio assembly.
|
||||
|
||||
The requested list will be cached to avoid multiple calls to the server.
|
||||
@ -491,7 +491,7 @@ class PDBList:
|
||||
assemblies = json.loads(response.read().decode("utf-8"))["result_set"]
|
||||
|
||||
# We transform the assemblies to match the format that they have historically been returned in.
|
||||
def transform(assembly: dict) -> Tuple[str, str]:
|
||||
def transform(assembly: dict) -> tuple[str, str]:
|
||||
split = assembly["identifier"].split("-")
|
||||
return split[0].lower(), split[-1]
|
||||
|
||||
|
@ -24,7 +24,7 @@ from Bio.PDB.StructureBuilder import StructureBuilder
|
||||
|
||||
|
||||
def _parse_resolution_from(
|
||||
tree: ElementTree, namespaces: Dict[str, str]
|
||||
tree: ElementTree, namespaces: dict[str, str]
|
||||
) -> Union[float, None]:
|
||||
for candidate in [
|
||||
"PDBx:refineCategory/PDBx:refine/PDBx:ls_d_res_high",
|
||||
@ -40,8 +40,8 @@ def _parse_resolution_from(
|
||||
|
||||
|
||||
def _parse_header_from(
|
||||
tree: ElementTree, namespaces: Dict[str, str]
|
||||
) -> Dict[str, Union[str, float]]:
|
||||
tree: ElementTree, namespaces: dict[str, str]
|
||||
) -> dict[str, Union[str, float]]:
|
||||
return {
|
||||
"name": tree.find(
|
||||
"PDBx:structCategory/PDBx:struct/PDBx:title", namespaces
|
||||
@ -64,7 +64,7 @@ def _parse_header_from(
|
||||
}
|
||||
|
||||
|
||||
def _parse_atom_from(element: Element, namespaces: Dict[str, str]):
|
||||
def _parse_atom_from(element: Element, namespaces: dict[str, str]):
|
||||
name = element.find("PDBx:label_atom_id", namespaces).text
|
||||
x = float(element.find("PDBx:Cartn_x", namespaces).text)
|
||||
y = float(element.find("PDBx:Cartn_y", namespaces).text)
|
||||
@ -83,8 +83,8 @@ def _parse_atom_from(element: Element, namespaces: Dict[str, str]):
|
||||
|
||||
|
||||
def _parse_residue_id_from(
|
||||
element: Element, namespaces: Dict[str, str]
|
||||
) -> Tuple[str, int, str]:
|
||||
element: Element, namespaces: dict[str, str]
|
||||
) -> tuple[str, int, str]:
|
||||
assert element.tag == f"{{{namespaces['PDBx']}}}atom_site"
|
||||
atom_group = element.find("PDBx:group_PDB", namespaces).text
|
||||
component_id = element.find("PDBx:label_comp_id", namespaces).text
|
||||
|
@ -175,7 +175,7 @@ def read_PIC(
|
||||
ak = akc[akstr] = AtomKey(akstr)
|
||||
return ak
|
||||
|
||||
def link_residues(ppr: List[Residue], pr: List[Residue]) -> None:
|
||||
def link_residues(ppr: list[Residue], pr: list[Residue]) -> None:
|
||||
"""Set next and prev links between i-1 and i-2 residues."""
|
||||
for p_r in pr:
|
||||
pric = p_r.internal_coord
|
||||
@ -196,7 +196,7 @@ def read_PIC(
|
||||
ang: str,
|
||||
l23: str,
|
||||
ric: IC_Residue,
|
||||
) -> Tuple:
|
||||
) -> tuple:
|
||||
"""Create Hedron on current (sbcic) Chain.internal_coord."""
|
||||
ek = (akcache(a1), akcache(a2), akcache(a3))
|
||||
atmNdx = AtomKey.fields.atm
|
||||
@ -211,7 +211,7 @@ def read_PIC(
|
||||
ak_add(ek, ric)
|
||||
return ek
|
||||
|
||||
def default_hedron(ek: Tuple, ric: IC_Residue) -> None:
|
||||
def default_hedron(ek: tuple, ric: IC_Residue) -> None:
|
||||
"""Create Hedron based on same re_class hedra in ref database.
|
||||
|
||||
Adds Hedron to current Chain.internal_coord, see ic_data for default
|
||||
@ -268,7 +268,7 @@ def read_PIC(
|
||||
if verbose:
|
||||
print(f" default for {ek}")
|
||||
|
||||
def hedra_check(dk: Tuple, ric: IC_Residue) -> None:
|
||||
def hedra_check(dk: tuple, ric: IC_Residue) -> None:
|
||||
"""Confirm both hedra present for dihedron key, use default if set."""
|
||||
if dk[0:3] not in sbcic.hedra and dk[2::-1] not in sbcic.hedra:
|
||||
if defaults:
|
||||
@ -283,7 +283,7 @@ def read_PIC(
|
||||
|
||||
def process_dihedron(
|
||||
a1: str, a2: str, a3: str, a4: str, dangle: str, ric: IC_Residue
|
||||
) -> Set:
|
||||
) -> set:
|
||||
"""Create Dihedron on current Chain.internal_coord."""
|
||||
ek = (
|
||||
akcache(a1),
|
||||
@ -306,7 +306,7 @@ def read_PIC(
|
||||
ak_add(ek, ric)
|
||||
return ek
|
||||
|
||||
def default_dihedron(ek: List, ric: IC_Residue) -> None:
|
||||
def default_dihedron(ek: list, ric: IC_Residue) -> None:
|
||||
"""Create Dihedron based on same residue class dihedra in ref database.
|
||||
|
||||
Adds Dihedron to current Chain.internal_coord, see ic_data for default
|
||||
@ -450,7 +450,7 @@ def read_PIC(
|
||||
# This method has some internal functions
|
||||
|
||||
# rnext should be set
|
||||
def ake_recurse(akList: List) -> List:
|
||||
def ake_recurse(akList: list) -> list:
|
||||
"""Build combinatorics of AtomKey lists."""
|
||||
car = akList[0]
|
||||
if len(akList) > 1:
|
||||
@ -469,7 +469,7 @@ def read_PIC(
|
||||
retList = [[ak] for ak in car]
|
||||
return retList
|
||||
|
||||
def ak_expand(eLst: List) -> List:
|
||||
def ak_expand(eLst: list) -> list:
|
||||
"""Expand AtomKey list with altlocs, all combinatorics."""
|
||||
retList = []
|
||||
for edron in eLst:
|
||||
@ -546,7 +546,7 @@ def read_PIC(
|
||||
pass # ignore missing combinatoric of altloc atoms
|
||||
# need more here?
|
||||
|
||||
def ak_add(ek: Tuple, ric: IC_Residue) -> None:
|
||||
def ak_add(ek: tuple, ric: IC_Residue) -> None:
|
||||
"""Allocate edron key AtomKeys to current residue as appropriate.
|
||||
|
||||
A hedron or dihedron may span a backbone amide bond, this routine
|
||||
|
@ -18,7 +18,7 @@ Reference:
|
||||
|
||||
import collections
|
||||
import math
|
||||
from typing import MutableMapping
|
||||
from collections.abc import MutableMapping
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
@ -6,7 +6,7 @@ See the `database website <https://alphafold.com/>`_ and the `API docs <https://
|
||||
import json
|
||||
import os
|
||||
from os import PathLike
|
||||
from typing import Iterator
|
||||
from collections.abc import Iterator
|
||||
from typing import Optional
|
||||
from typing import Union
|
||||
from urllib.request import urlopen
|
||||
|
@ -32,7 +32,7 @@ from Bio.PDB.Residue import Residue
|
||||
from Bio.PDB.Structure import Structure
|
||||
|
||||
|
||||
def structure_rebuild_test(entity, verbose: bool = False, quick: bool = False) -> Dict:
|
||||
def structure_rebuild_test(entity, verbose: bool = False, quick: bool = False) -> dict:
|
||||
"""Test rebuild PDB structure from internal coordinates.
|
||||
|
||||
Generates internal coordinates for entity and writes to a .pic file in
|
||||
@ -73,9 +73,9 @@ def structure_rebuild_test(entity, verbose: bool = False, quick: bool = False) -
|
||||
|
||||
def report_IC(
|
||||
entity: Union[Structure, Model, Chain, Residue],
|
||||
reportDict: Dict[str, Any] = None,
|
||||
reportDict: dict[str, Any] = None,
|
||||
verbose: bool = False,
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
"""Generate dict with counts of ic data elements for each entity level.
|
||||
|
||||
reportDict entries are:
|
||||
@ -198,7 +198,7 @@ def IC_duplicate(entity) -> Structure:
|
||||
return read_PIC(sp)
|
||||
|
||||
|
||||
def _atmfid_d2h(atm: Atom) -> Tuple:
|
||||
def _atmfid_d2h(atm: Atom) -> tuple:
|
||||
afid = list(atm.get_full_id())
|
||||
afid4 = list(afid[4])
|
||||
afid40 = re.sub("D", "H", afid4[0], count=1)
|
||||
@ -212,7 +212,7 @@ def _cmp_atm(
|
||||
a0: Atom,
|
||||
a1: Atom,
|
||||
verbose: bool,
|
||||
cmpdict: Dict,
|
||||
cmpdict: dict,
|
||||
rtol: float = None,
|
||||
atol: float = None,
|
||||
) -> None:
|
||||
@ -272,7 +272,7 @@ def _cmp_res(
|
||||
r0: Residue,
|
||||
r1: Residue,
|
||||
verbose: bool,
|
||||
cmpdict: Dict,
|
||||
cmpdict: dict,
|
||||
rtol: float = None,
|
||||
atol: float = None,
|
||||
) -> None:
|
||||
@ -348,7 +348,7 @@ def compare_residues(
|
||||
quick: bool = False,
|
||||
rtol: float = None,
|
||||
atol: float = None,
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
"""Compare full IDs and atom coordinates for 2 Biopython PDB entities.
|
||||
|
||||
Skip DNA and HETATMs.
|
||||
@ -368,7 +368,7 @@ def compare_residues(
|
||||
Full ID match atoms, and Coordinate match atoms; report string;
|
||||
error status (bool)
|
||||
"""
|
||||
cmpdict: Dict[str, Any] = {}
|
||||
cmpdict: dict[str, Any] = {}
|
||||
cmpdict["chains"] = [] # list of chain IDs (union over both structures)
|
||||
cmpdict["residues"] = 0 # count of not HETATM residues in longest chain
|
||||
cmpdict["rCount"] = 0 # Biopython Residues (includes HETATMs, waters)
|
||||
|
@ -303,14 +303,14 @@ if TYPE_CHECKING:
|
||||
|
||||
# from Bio.PDB.Chain import Chain
|
||||
|
||||
HKT = Tuple["AtomKey", "AtomKey", "AtomKey"] # Hedron key tuple
|
||||
DKT = Tuple["AtomKey", "AtomKey", "AtomKey", "AtomKey"] # Dihedron Key Tuple
|
||||
HKT = tuple["AtomKey", "AtomKey", "AtomKey"] # Hedron key tuple
|
||||
DKT = tuple["AtomKey", "AtomKey", "AtomKey", "AtomKey"] # Dihedron Key Tuple
|
||||
EKT = Union[HKT, DKT] # Edron Key Tuple
|
||||
BKT = Tuple["AtomKey", "AtomKey"] # Bond Key Tuple
|
||||
BKT = tuple["AtomKey", "AtomKey"] # Bond Key Tuple
|
||||
|
||||
# HACS = Tuple[np.array, np.array, np.array] # Hedron Atom Coord Set
|
||||
HACS = np.array # Hedron Atom Coord Set
|
||||
DACS = Tuple[np.array, np.array, np.array, np.array] # Dihedron Atom Coord Set
|
||||
DACS = tuple[np.array, np.array, np.array, np.array] # Dihedron Atom Coord Set
|
||||
|
||||
|
||||
class IC_Chain:
|
||||
@ -488,7 +488,7 @@ class IC_Chain:
|
||||
"""
|
||||
# type hinting parent as Chain leads to import cycle
|
||||
self.chain = parent
|
||||
self.ordered_aa_ic_list: List[IC_Residue] = []
|
||||
self.ordered_aa_ic_list: list[IC_Residue] = []
|
||||
# self.initNCaC: Dict[Tuple[str], Dict["AtomKey", np.array]] = {}
|
||||
self.initNCaCs = []
|
||||
|
||||
@ -502,9 +502,9 @@ class IC_Chain:
|
||||
# cache of AtomKey results for cak()
|
||||
# self.akc: Dict[Tuple(IC_Residue, str), AtomKey] = {}
|
||||
|
||||
self.atomArrayIndex: Dict["AtomKey", int] = {}
|
||||
self.atomArrayIndex: dict["AtomKey", int] = {}
|
||||
|
||||
self.bpAtomArray: List["Atom"] = [] # rtm
|
||||
self.bpAtomArray: list["Atom"] = [] # rtm
|
||||
|
||||
self._set_residues(verbose) # no effect if no residues loaded
|
||||
|
||||
@ -657,8 +657,8 @@ class IC_Chain:
|
||||
|
||||
# drop through for else Natom or pCatom is disordered:
|
||||
|
||||
Nlist: List[Atom] = []
|
||||
pClist: List[Atom] = []
|
||||
Nlist: list[Atom] = []
|
||||
pClist: list[Atom] = []
|
||||
if Natom.is_disordered():
|
||||
Nlist.extend(Natom.child_dict.values())
|
||||
else:
|
||||
@ -684,8 +684,8 @@ class IC_Chain:
|
||||
def _add_residue(
|
||||
self,
|
||||
res: "Residue",
|
||||
last_res: List,
|
||||
last_ord_res: List,
|
||||
last_res: list,
|
||||
last_ord_res: list,
|
||||
verbose: bool = False,
|
||||
) -> bool:
|
||||
"""Set rprev, rnext, manage chain break.
|
||||
@ -743,15 +743,15 @@ class IC_Chain:
|
||||
self.akset : set of :class:`.AtomKey` s in this chain
|
||||
"""
|
||||
# ndx = 0
|
||||
last_res: List["IC_Residue"] = []
|
||||
last_ord_res: List["IC_Residue"] = []
|
||||
last_res: list["IC_Residue"] = []
|
||||
last_ord_res: list["IC_Residue"] = []
|
||||
|
||||
# atomCoordDict = {}
|
||||
akset = set()
|
||||
for res in self.chain.get_residues():
|
||||
# select only not hetero or accepted hetero
|
||||
if res.id[0] == " " or res.id[0] in IC_Residue.accept_resnames:
|
||||
this_res: List["IC_Residue"] = []
|
||||
this_res: list["IC_Residue"] = []
|
||||
if 2 == res.is_disordered() and not IC_Residue.no_altloc:
|
||||
# print('disordered res:', res.is_disordered(), res)
|
||||
for r in res.child_dict.values():
|
||||
@ -1003,11 +1003,11 @@ class IC_Chain:
|
||||
|
||||
def _hedraDict2chain(
|
||||
self,
|
||||
hl12: Dict[str, float],
|
||||
ha: Dict[str, float],
|
||||
hl23: Dict[str, float],
|
||||
da: Dict[str, float],
|
||||
bfacs: Dict[str, float],
|
||||
hl12: dict[str, float],
|
||||
ha: dict[str, float],
|
||||
hl23: dict[str, float],
|
||||
da: dict[str, float],
|
||||
bfacs: dict[str, float],
|
||||
) -> None:
|
||||
"""Generate chain numpy arrays from :func:`.read_PIC` dicts.
|
||||
|
||||
@ -1734,7 +1734,7 @@ class IC_Chain:
|
||||
|
||||
@staticmethod
|
||||
def _writeSCAD_dihed(
|
||||
fp: TextIO, d: "Dihedron", hedraNdx: Dict, hedraSet: Set[EKT]
|
||||
fp: TextIO, d: "Dihedron", hedraNdx: dict, hedraSet: set[EKT]
|
||||
) -> None:
|
||||
fp.write(
|
||||
f"[ {d.angle:9.5f}, {hedraNdx[d.h1key]}, {hedraNdx[d.h2key]}, {1 if d.reverse else 0}, "
|
||||
@ -1775,9 +1775,9 @@ class IC_Chain:
|
||||
for k, h in ric.hedra.items():
|
||||
hedra[k] = h
|
||||
|
||||
atomSet: Set[AtomKey] = set()
|
||||
bondDict: Dict = {} # set()
|
||||
hedraSet: Set[EKT] = set()
|
||||
atomSet: set[AtomKey] = set()
|
||||
bondDict: dict = {} # set()
|
||||
hedraSet: set[EKT] = set()
|
||||
ndx = 0
|
||||
hedraNdx = {}
|
||||
|
||||
@ -2574,21 +2574,21 @@ class IC_Residue:
|
||||
self.residue = parent
|
||||
self.cic: IC_Chain
|
||||
# dict of hedron objects indexed by hedron keys
|
||||
self.hedra: Dict[HKT, Hedron] = {}
|
||||
self.hedra: dict[HKT, Hedron] = {}
|
||||
# dict of dihedron objects indexed by dihedron keys
|
||||
self.dihedra: Dict[DKT, Dihedron] = {}
|
||||
self.dihedra: dict[DKT, Dihedron] = {}
|
||||
# cache of AtomKey results for rak()
|
||||
self.akc: Dict[Union[str, Atom], AtomKey] = {}
|
||||
self.akc: dict[Union[str, Atom], AtomKey] = {}
|
||||
# set of AtomKeys involved in dihedra, used by split_akl,
|
||||
# build_rak_cache. Built by __init__ for XYZ (PDB coord) input,
|
||||
# _link_dihedra for PIC input
|
||||
self.ak_set: Set[AtomKey] = set()
|
||||
self.ak_set: set[AtomKey] = set()
|
||||
# reference to adjacent residues in chain
|
||||
self.rprev: List[IC_Residue] = []
|
||||
self.rnext: List[IC_Residue] = []
|
||||
self.rprev: list[IC_Residue] = []
|
||||
self.rnext: list[IC_Residue] = []
|
||||
# bfactors copied from PDB file
|
||||
self.bfactors: Dict[str, float] = {}
|
||||
self.alt_ids: Union[List[str], None] = None if IC_Residue.no_altloc else []
|
||||
self.bfactors: dict[str, float] = {}
|
||||
self.alt_ids: Union[list[str], None] = None if IC_Residue.no_altloc else []
|
||||
self.is20AA = True
|
||||
self.isAccept = True
|
||||
# self.NCaCKey Set by _link_dihedra()
|
||||
@ -2751,7 +2751,7 @@ class IC_Residue:
|
||||
elif h.e_class == "CACO":
|
||||
h.hbond_2 = True
|
||||
|
||||
def _default_startpos(self) -> Dict["AtomKey", np.array]:
|
||||
def _default_startpos(self) -> dict["AtomKey", np.array]:
|
||||
"""Generate default N-Ca-C coordinates to build this residue from."""
|
||||
atomCoords = {}
|
||||
cic = self.cic
|
||||
@ -2768,7 +2768,7 @@ class IC_Residue:
|
||||
# cic.atomArrayValid[cic.atomArrayIndex[a]] = True
|
||||
return atomCoords
|
||||
|
||||
def _get_startpos(self) -> Dict["AtomKey", np.array]:
|
||||
def _get_startpos(self) -> dict["AtomKey", np.array]:
|
||||
"""Find N-Ca-C coordinates to build this residue from."""
|
||||
# only used by assemble()
|
||||
startPos = {}
|
||||
@ -2794,7 +2794,7 @@ class IC_Residue:
|
||||
self,
|
||||
resetLocation: bool = False,
|
||||
verbose: bool = False,
|
||||
) -> Union[Dict["AtomKey", np.array], Dict[HKT, np.array], None]:
|
||||
) -> Union[dict["AtomKey", np.array], dict[HKT, np.array], None]:
|
||||
"""Compute atom coordinates for this residue from internal coordinates.
|
||||
|
||||
This is the IC_Residue part of the :meth:`.assemble_residues_ser` serial
|
||||
@ -2998,9 +2998,9 @@ class IC_Residue:
|
||||
|
||||
def split_akl(
|
||||
self,
|
||||
lst: Union[Tuple["AtomKey", ...], List["AtomKey"]],
|
||||
lst: Union[tuple["AtomKey", ...], list["AtomKey"]],
|
||||
missingOK: bool = False,
|
||||
) -> List[Tuple["AtomKey", ...]]:
|
||||
) -> list[tuple["AtomKey", ...]]:
|
||||
"""Get AtomKeys for this residue (ak_set) for generic list of AtomKeys.
|
||||
|
||||
Changes and/or expands a list of 'generic' AtomKeys (e.g. 'N, C, C') to
|
||||
@ -3029,9 +3029,9 @@ class IC_Residue:
|
||||
# given a list of AtomKeys
|
||||
# form a new list of same atomkeys with coords or diheds in this residue
|
||||
# plus lists of matching altloc atomkeys in coords or diheds
|
||||
edraLst: List[Tuple[AtomKey, ...]] = []
|
||||
edraLst: list[tuple[AtomKey, ...]] = []
|
||||
altlocs = set()
|
||||
posnAltlocs: Dict["AtomKey", Set[str]] = {}
|
||||
posnAltlocs: dict["AtomKey", set[str]] = {}
|
||||
akMap = {}
|
||||
for ak in lst:
|
||||
posnAltlocs[ak] = set()
|
||||
@ -3100,7 +3100,7 @@ class IC_Residue:
|
||||
# print(new_edraLst)
|
||||
return new_edraLst
|
||||
|
||||
def _gen_edra(self, lst: Union[Tuple["AtomKey", ...], List["AtomKey"]]) -> None:
|
||||
def _gen_edra(self, lst: Union[tuple["AtomKey", ...], list["AtomKey"]]) -> None:
|
||||
"""Populate hedra/dihedra given edron ID tuple.
|
||||
|
||||
Given list of AtomKeys defining hedron or dihedron
|
||||
@ -3120,7 +3120,7 @@ class IC_Residue:
|
||||
else:
|
||||
cdct, dct, obj = self.cic.dihedra, self.dihedra, Dihedron # type: ignore # noqa
|
||||
|
||||
if isinstance(lst, List):
|
||||
if isinstance(lst, list):
|
||||
tlst = tuple(lst)
|
||||
else:
|
||||
tlst = lst
|
||||
@ -3425,7 +3425,7 @@ class IC_Residue:
|
||||
picFlagsDict = pic_flags._asdict()
|
||||
"""Dictionary of pic_flags values to use as needed."""
|
||||
|
||||
def _write_pic_bfac(self, atm: Atom, s: str, col: int) -> Tuple[str, int]:
|
||||
def _write_pic_bfac(self, atm: Atom, s: str, col: int) -> tuple[str, int]:
|
||||
ak = self.rak(atm)
|
||||
if 0 == col % 5:
|
||||
s += "BFAC:"
|
||||
@ -3550,7 +3550,7 @@ class IC_Residue:
|
||||
|
||||
return s
|
||||
|
||||
def _get_ak_tuple(self, ak_str: str) -> Optional[Tuple["AtomKey", ...]]:
|
||||
def _get_ak_tuple(self, ak_str: str) -> Optional[tuple["AtomKey", ...]]:
|
||||
"""Convert atom pair string to AtomKey tuple.
|
||||
|
||||
:param str ak_str:
|
||||
@ -3799,7 +3799,7 @@ class IC_Residue:
|
||||
|
||||
def pick_length(
|
||||
self, ak_spec: Union[str, BKT]
|
||||
) -> Tuple[Optional[List["Hedron"]], Optional[BKT]]:
|
||||
) -> tuple[Optional[list["Hedron"]], Optional[BKT]]:
|
||||
"""Get list of hedra containing specified atom pair.
|
||||
|
||||
:param ak_spec:
|
||||
@ -3829,7 +3829,7 @@ class IC_Residue:
|
||||
:return: list of hedra containing specified atom pair as tuples of
|
||||
AtomKeys
|
||||
"""
|
||||
rlst: List[Hedron] = []
|
||||
rlst: list[Hedron] = []
|
||||
# if ":" in ak_spec:
|
||||
if isinstance(ak_spec, str):
|
||||
ak_spec = cast(BKT, self._get_ak_tuple(ak_spec))
|
||||
@ -3949,7 +3949,7 @@ class Edron:
|
||||
Dihedron objects"""
|
||||
|
||||
@staticmethod
|
||||
def gen_key(lst: List["AtomKey"]) -> str:
|
||||
def gen_key(lst: list["AtomKey"]) -> str:
|
||||
"""Generate string of ':'-joined AtomKey strings from input.
|
||||
|
||||
Generate '2_A_C:3_P_N:3_P_CA' from (2_A_C, 3_P_N, 3_P_CA)
|
||||
@ -3961,7 +3961,7 @@ class Edron:
|
||||
return f"{lst[0].id}:{lst[1].id}:{lst[2].id}"
|
||||
|
||||
@staticmethod
|
||||
def gen_tuple(akstr: str) -> Tuple:
|
||||
def gen_tuple(akstr: str) -> tuple:
|
||||
"""Generate AtomKey tuple for ':'-joined AtomKey string.
|
||||
|
||||
Generate (2_A_C, 3_P_N, 3_P_CA) from '2_A_C:3_P_N:3_P_CA'
|
||||
@ -3970,7 +3970,7 @@ class Edron:
|
||||
return tuple([AtomKey(i) for i in akstr.split(":")])
|
||||
|
||||
# @profile
|
||||
def __init__(self, *args: Union[List["AtomKey"], EKT], **kwargs: str) -> None:
|
||||
def __init__(self, *args: Union[list["AtomKey"], EKT], **kwargs: str) -> None:
|
||||
"""Initialize Edron with sequence of AtomKeys.
|
||||
|
||||
Acceptable input:
|
||||
@ -3979,7 +3979,7 @@ class Edron:
|
||||
AtomKey, ... : sequence of AtomKeys as args
|
||||
{'a1': str, 'a2': str, ... } : dict of AtomKeys as 'a1', 'a2' ...
|
||||
"""
|
||||
atomkeys: List[AtomKey] = []
|
||||
atomkeys: list[AtomKey] = []
|
||||
for arg in args:
|
||||
if isinstance(arg, list):
|
||||
atomkeys = arg
|
||||
@ -4057,7 +4057,7 @@ class Edron:
|
||||
"""Hash calculated at init from atomkeys tuple."""
|
||||
return self._hash
|
||||
|
||||
def _cmp(self, other: "Edron") -> Union[Tuple["AtomKey", "AtomKey"], bool]:
|
||||
def _cmp(self, other: "Edron") -> Union[tuple["AtomKey", "AtomKey"], bool]:
|
||||
"""Comparison function ranking self vs. other; False on equal.
|
||||
|
||||
Priority is lowest value for sort: psi < chi1.
|
||||
@ -4085,7 +4085,7 @@ class Edron:
|
||||
return NotImplemented
|
||||
rslt = self._cmp(other)
|
||||
if rslt:
|
||||
rslt = cast(Tuple[AtomKey, AtomKey], rslt)
|
||||
rslt = cast(tuple[AtomKey, AtomKey], rslt)
|
||||
return rslt[0] > rslt[1]
|
||||
return False
|
||||
|
||||
@ -4095,7 +4095,7 @@ class Edron:
|
||||
return NotImplemented
|
||||
rslt = self._cmp(other)
|
||||
if rslt:
|
||||
rslt = cast(Tuple[AtomKey, AtomKey], rslt)
|
||||
rslt = cast(tuple[AtomKey, AtomKey], rslt)
|
||||
return rslt[0] >= rslt[1]
|
||||
return True
|
||||
|
||||
@ -4105,7 +4105,7 @@ class Edron:
|
||||
return NotImplemented
|
||||
rslt = self._cmp(other)
|
||||
if rslt:
|
||||
rslt = cast(Tuple[AtomKey, AtomKey], rslt)
|
||||
rslt = cast(tuple[AtomKey, AtomKey], rslt)
|
||||
return rslt[0] < rslt[1]
|
||||
return False
|
||||
|
||||
@ -4115,7 +4115,7 @@ class Edron:
|
||||
return NotImplemented
|
||||
rslt = self._cmp(other)
|
||||
if rslt:
|
||||
rslt = cast(Tuple[AtomKey, AtomKey], rslt)
|
||||
rslt = cast(tuple[AtomKey, AtomKey], rslt)
|
||||
return rslt[0] <= rslt[1]
|
||||
return True
|
||||
|
||||
@ -4151,7 +4151,7 @@ class Hedron(Edron):
|
||||
setters for relevant attributes (angle in degrees)
|
||||
"""
|
||||
|
||||
def __init__(self, *args: Union[List["AtomKey"], HKT], **kwargs: str) -> None:
|
||||
def __init__(self, *args: Union[list["AtomKey"], HKT], **kwargs: str) -> None:
|
||||
"""Initialize Hedron with sequence of AtomKeys, kwargs.
|
||||
|
||||
Acceptable input:
|
||||
@ -4302,7 +4302,7 @@ class Dihedron(Edron):
|
||||
return :data:`IC_Residue.pic_flags` bitmask for dihedron psi, omega, etc
|
||||
"""
|
||||
|
||||
def __init__(self, *args: Union[List["AtomKey"], DKT], **kwargs: str) -> None:
|
||||
def __init__(self, *args: Union[list["AtomKey"], DKT], **kwargs: str) -> None:
|
||||
"""Init Dihedron with sequence of AtomKeys and optional dihedral angle.
|
||||
|
||||
Acceptable input:
|
||||
@ -4372,7 +4372,7 @@ class Dihedron(Edron):
|
||||
else:
|
||||
self.primary = False
|
||||
|
||||
def _set_hedra(self) -> Tuple[bool, Hedron, Hedron]:
|
||||
def _set_hedra(self) -> tuple[bool, Hedron, Hedron]:
|
||||
"""Work out hedra keys and set rev flag."""
|
||||
try:
|
||||
return self.rev, self.hedron1, self.hedron2
|
||||
@ -4470,7 +4470,7 @@ class Dihedron(Edron):
|
||||
return 180.0 - ((180.0 - a2) + a1) % 360.0
|
||||
|
||||
@staticmethod
|
||||
def angle_avg(alst: List, in_rads: bool = False, out_rads: bool = False):
|
||||
def angle_avg(alst: list, in_rads: bool = False, out_rads: bool = False):
|
||||
"""Get average of list of +/-180 angles.
|
||||
|
||||
:param List alst: list of angles to average
|
||||
@ -4482,7 +4482,7 @@ class Dihedron(Edron):
|
||||
return ravg if out_rads else np.rad2deg(ravg)
|
||||
|
||||
@staticmethod
|
||||
def angle_pop_sd(alst: List, avg: float):
|
||||
def angle_pop_sd(alst: list, avg: float):
|
||||
"""Get population standard deviation for list of +/-180 angles.
|
||||
|
||||
should be sample std dev but avoid len(alst)=1 -> div by 0
|
||||
@ -4619,7 +4619,7 @@ class AtomKey:
|
||||
"""Set True to convert D Deuterium to H Hydrogen on input."""
|
||||
|
||||
def __init__(
|
||||
self, *args: Union[IC_Residue, Atom, List, Dict, str], **kwargs: str
|
||||
self, *args: Union[IC_Residue, Atom, list, dict, str], **kwargs: str
|
||||
) -> None:
|
||||
"""Initialize AtomKey with residue and atom data.
|
||||
|
||||
@ -4633,7 +4633,7 @@ class AtomKey:
|
||||
(respos: 52, icode: None, atm: 'CA', ...) : kwargs with fieldNames
|
||||
52_G_CA, 52B_G_CA, 52_G_CA_0.33, 52_G_CA_B_0.33 : id strings
|
||||
"""
|
||||
akl: List[Optional[str]] = []
|
||||
akl: list[Optional[str]] = []
|
||||
self.ric = None
|
||||
|
||||
for arg in args:
|
||||
@ -4797,7 +4797,7 @@ class AtomKey:
|
||||
return "Hsb" if akl[atmNdx][0] == "H" else None
|
||||
|
||||
# @profile
|
||||
def _cmp(self, other: "AtomKey") -> Tuple[int, int]:
|
||||
def _cmp(self, other: "AtomKey") -> tuple[int, int]:
|
||||
"""Comparison function ranking self vs. other.
|
||||
|
||||
Priority is lower value, i.e. (CA, CB) gives (0, 1) for sorting.
|
||||
|
@ -503,7 +503,7 @@ def _get_azimuth(x: float, y: float) -> float:
|
||||
)
|
||||
|
||||
|
||||
def get_spherical_coordinates(xyz: np.ndarray) -> Tuple[float, float, float]:
|
||||
def get_spherical_coordinates(xyz: np.ndarray) -> tuple[float, float, float]:
|
||||
"""Compute spherical coordinates (r, azimuth, polar_angle) for X,Y,Z point.
|
||||
|
||||
:param array xyz: column vector (3 row x 1 column NumPy array)
|
||||
@ -526,7 +526,7 @@ gmrz2 = np.identity(4, dtype=np.float64)
|
||||
|
||||
def coord_space(
|
||||
a0: np.ndarray, a1: np.ndarray, a2: np.ndarray, rev: bool = False
|
||||
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
|
||||
) -> tuple[np.ndarray, Optional[np.ndarray]]:
|
||||
"""Generate transformation matrix to coordinate space defined by 3 points.
|
||||
|
||||
New coordinate space will have:
|
||||
|
@ -2133,7 +2133,7 @@ class Seq(_SeqAbstractBaseClass):
|
||||
current = 0 # not needed here, but it keeps mypy happy
|
||||
end = -1
|
||||
starts = sorted(data.keys())
|
||||
_data: Dict[int, bytes] = {}
|
||||
_data: dict[int, bytes] = {}
|
||||
for start in starts:
|
||||
seq = data[start]
|
||||
if isinstance(seq, str):
|
||||
|
@ -325,7 +325,7 @@ _HEADFMT = ">H4sI2H3I"
|
||||
# directory data structure
|
||||
_DIRFMT = ">4sI2H4I"
|
||||
|
||||
__global_tag_listing: List[str] = []
|
||||
__global_tag_listing: list[str] = []
|
||||
for tag in _INSTRUMENT_SPECIFIC_TAGS.values():
|
||||
__global_tag_listing += tag.keys()
|
||||
|
||||
|
@ -16,7 +16,7 @@ from os import PathLike
|
||||
from typing import AnyStr
|
||||
from typing import Generic
|
||||
from typing import IO
|
||||
from typing import Iterator
|
||||
from collections.abc import Iterator
|
||||
from typing import Optional
|
||||
from typing import Union
|
||||
|
||||
|
@ -52,7 +52,7 @@ Or,
|
||||
Note these examples only show the first 50 bases to keep the output short.
|
||||
"""
|
||||
|
||||
from typing import Iterator
|
||||
from collections.abc import Iterator
|
||||
|
||||
from Bio.SeqRecord import SeqRecord
|
||||
from Bio.Sequencing import Phd
|
||||
|
@ -364,11 +364,11 @@ from math import log
|
||||
from typing import Any
|
||||
from typing import Callable
|
||||
from typing import IO
|
||||
from typing import Iterator
|
||||
from collections.abc import Iterator
|
||||
from typing import List
|
||||
from typing import Mapping
|
||||
from collections.abc import Mapping
|
||||
from typing import Optional
|
||||
from typing import Sequence
|
||||
from collections.abc import Sequence
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
@ -533,7 +533,7 @@ def phred_quality_from_solexa(solexa_quality: float) -> float:
|
||||
return 10 * log(10 ** (solexa_quality / 10.0) + 1, 10)
|
||||
|
||||
|
||||
def _get_phred_quality(record: SeqRecord) -> Union[List[float], List[int]]:
|
||||
def _get_phred_quality(record: SeqRecord) -> Union[list[float], list[int]]:
|
||||
"""Extract PHRED qualities from a SeqRecord's letter_annotations (PRIVATE).
|
||||
|
||||
If there are no PHRED qualities, but there are Solexa qualities, those are
|
||||
@ -831,7 +831,7 @@ def _get_solexa_quality_str(record: SeqRecord) -> str:
|
||||
|
||||
|
||||
# TODO - Default to nucleotide or even DNA?
|
||||
def FastqGeneralIterator(source: _TextIOSource) -> Iterator[Tuple[str, str, str]]:
|
||||
def FastqGeneralIterator(source: _TextIOSource) -> Iterator[tuple[str, str, str]]:
|
||||
"""Iterate over Fastq records as string tuples (not as SeqRecord objects).
|
||||
|
||||
Arguments:
|
||||
@ -1421,7 +1421,7 @@ class QualPhredIterator(SequenceIterator):
|
||||
id = descr.split()[0]
|
||||
name = id
|
||||
|
||||
qualities: List[int] = []
|
||||
qualities: list[int] = []
|
||||
for line in handle:
|
||||
if line[0] == ">":
|
||||
break
|
||||
|
@ -378,7 +378,7 @@ making up each alignment as SeqRecords.
|
||||
|
||||
from typing import Callable
|
||||
from typing import Dict
|
||||
from typing import Iterable
|
||||
from collections.abc import Iterable
|
||||
from typing import Union
|
||||
|
||||
from Bio.File import as_handle
|
||||
@ -454,7 +454,7 @@ _FormatToIterator = {
|
||||
"xdna": XdnaIO.XdnaIterator,
|
||||
}
|
||||
|
||||
_FormatToString: Dict[str, Callable[[SeqRecord], str]] = {
|
||||
_FormatToString: dict[str, Callable[[SeqRecord], str]] = {
|
||||
"fasta": FastaIO.as_fasta,
|
||||
"fasta-2line": FastaIO.as_fasta_2line,
|
||||
"tab": TabIO.as_tab,
|
||||
|
@ -18,13 +18,13 @@ from io import StringIO
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
from typing import Dict
|
||||
from typing import Iterable
|
||||
from collections.abc import Iterable
|
||||
from typing import List
|
||||
from typing import Mapping
|
||||
from collections.abc import Mapping
|
||||
from typing import NoReturn
|
||||
from typing import Optional
|
||||
from typing import overload
|
||||
from typing import Sequence
|
||||
from collections.abc import Sequence
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import Union
|
||||
|
||||
@ -40,7 +40,7 @@ if TYPE_CHECKING:
|
||||
_NO_SEQRECORD_COMPARISON = "SeqRecord comparison is deliberately not implemented. Explicitly compare the attributes of interest."
|
||||
|
||||
|
||||
class _RestrictedDict(Dict[str, Sequence[Any]]):
|
||||
class _RestrictedDict(dict[str, Sequence[Any]]):
|
||||
"""Dict which only allows sequences of given length as values (PRIVATE).
|
||||
|
||||
This simple subclass of the Python dictionary is used in the SeqRecord
|
||||
@ -177,10 +177,10 @@ class SeqRecord:
|
||||
"""
|
||||
|
||||
_AnnotationsDictValue = Union[str, int]
|
||||
_AnnotationsDict = Dict[str, _AnnotationsDictValue]
|
||||
_AnnotationsDict = dict[str, _AnnotationsDictValue]
|
||||
|
||||
annotations: _AnnotationsDict
|
||||
dbxrefs: List[str]
|
||||
dbxrefs: list[str]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -188,10 +188,10 @@ class SeqRecord:
|
||||
id: Optional[str] = "<unknown id>",
|
||||
name: str = "<unknown name>",
|
||||
description: str = "<unknown description>",
|
||||
dbxrefs: Optional[List[str]] = None,
|
||||
features: Optional[List["SeqFeature"]] = None,
|
||||
dbxrefs: Optional[list[str]] = None,
|
||||
features: Optional[list["SeqFeature"]] = None,
|
||||
annotations: Optional[_AnnotationsDict] = None,
|
||||
letter_annotations: Optional[Dict[str, Sequence[Any]]] = None,
|
||||
letter_annotations: Optional[dict[str, Sequence[Any]]] = None,
|
||||
) -> None:
|
||||
"""Create a SeqRecord.
|
||||
|
||||
@ -689,7 +689,7 @@ class SeqRecord:
|
||||
|
||||
Note that long sequences are shown truncated.
|
||||
"""
|
||||
lines: List[str] = []
|
||||
lines: list[str] = []
|
||||
if self.id:
|
||||
lines.append(f"ID: {self.id}")
|
||||
if self.name:
|
||||
|
@ -46,9 +46,9 @@ _BASE_URL = "http://togows.dbcls.jp"
|
||||
# Caches:
|
||||
_search_db_names = None
|
||||
_entry_db_names = None
|
||||
_entry_db_fields: Dict[str, str] = {}
|
||||
_entry_db_formats: Dict[str, str] = {}
|
||||
_convert_formats: List[str] = []
|
||||
_entry_db_fields: dict[str, str] = {}
|
||||
_entry_db_formats: dict[str, str] = {}
|
||||
_convert_formats: list[str] = []
|
||||
|
||||
|
||||
def _get_fields(url):
|
||||
|
@ -38,7 +38,7 @@ def _get_next_link(response: HTTPResponse) -> Optional[str]:
|
||||
return None
|
||||
|
||||
|
||||
def _get_results(response: HTTPResponse) -> List[dict]:
|
||||
def _get_results(response: HTTPResponse) -> list[dict]:
|
||||
return json.loads(response.read().decode())["results"]
|
||||
|
||||
|
||||
@ -66,7 +66,7 @@ class _UniProtSearchResults:
|
||||
|
||||
def __init__(self, first_url: str):
|
||||
self.next_url = first_url
|
||||
self.results_cache: List[dict] = []
|
||||
self.results_cache: list[dict] = []
|
||||
self.next_result_index = 0
|
||||
response = self._fetch_next_batch()
|
||||
self.search_result_count = _get_search_result_count(response)
|
||||
@ -116,7 +116,7 @@ class _UniProtSearchResults:
|
||||
|
||||
|
||||
def search(
|
||||
query: str, fields: Optional[List[str]] = None, batch_size: int = 500
|
||||
query: str, fields: Optional[list[str]] = None, batch_size: int = 500
|
||||
) -> _UniProtSearchResults:
|
||||
"""Search the UniProt database.
|
||||
|
||||
|
@ -18,7 +18,6 @@ Note: Currently we do not support recording per-letter-annotations
|
||||
(like quality scores) in BioSQL.
|
||||
"""
|
||||
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
|
||||
from Bio import SeqFeature
|
||||
@ -543,14 +542,14 @@ class DBSeqRecord(SeqRecord):
|
||||
seq = property(__get_seq, __set_seq, __del_seq, "Seq object")
|
||||
|
||||
@property
|
||||
def dbxrefs(self) -> List[str]:
|
||||
def dbxrefs(self) -> list[str]:
|
||||
"""Database cross references."""
|
||||
if not hasattr(self, "_dbxrefs"):
|
||||
self._dbxrefs = _retrieve_dbxrefs(self._adaptor, self._primary_id)
|
||||
return self._dbxrefs
|
||||
|
||||
@dbxrefs.setter
|
||||
def dbxrefs(self, value: List[str]) -> None:
|
||||
def dbxrefs(self, value: list[str]) -> None:
|
||||
self._dbxrefs = value
|
||||
|
||||
@dbxrefs.deleter
|
||||
|
@ -13,10 +13,8 @@
|
||||
"""Helper code for Biopython's BioSQL code (for internal use)."""
|
||||
|
||||
import os
|
||||
from typing import Dict
|
||||
from typing import Type
|
||||
|
||||
_dbutils: Dict[str, Type["Generic_dbutils"]] = {}
|
||||
_dbutils: dict[str, type["Generic_dbutils"]] = {}
|
||||
|
||||
|
||||
class Generic_dbutils:
|
||||
|
Reference in New Issue
Block a user