Commit 95dfe2c2 authored by Peter Schubert's avatar Peter Schubert
Browse files

Code improvements

parent e48b2d7e
......@@ -33,13 +33,24 @@ reduction process. Essential reactions are identified earlier.
Peter Schubert, Computational Cell Biology, HHU Duesseldorf, November 2021
## Small Python examples
## install package:
```shell
$ pip3 install networkred@git+https://gitlab.cs.uni-duesseldorf.de/schubert/networkred.git
```
## Small Python exampls
```python
>>> import networkred
>>> .... (yet to be filled)
```
## License
[GPLv3](LICENSE.txt)
......
# This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
import sys
import os
import logging
import pandas as pd
import networkred
logfile = './log/nr.log'
logformat = '%(asctime)s %(levelname)s:%(name)s %(message)s'
logdir = os.path.dirname(logfile)
if not os.path.exists(logdir):
os.mkdir(logdir)
logging.basicConfig(filename=logfile, filemode='w', level=logging.INFO, format=logformat)
logging.info('Started')
print('Python Version:', sys.version_info)
print('Current working directory:', os.getcwd())
print('networkred verion:', networkred.__version__)
# file names and directories
#model_name = 'iJO1366_CS'
#protected_parts_file = 'iJO1366_CS_nr_protect.xlsx' # in data_dir
#model_name = 'iML1515_CS'
#protected_parts_file = 'iML1515_CS_pf1.xlsx' # in data_dir
model_name = 'Deniz_model_fba'
protected_parts_file = 'Deniz_model_fba_pf.xlsx' # in data_dir
sbml_dir = 'sample_data/SBML_models'
data_dir = 'sample_data/data'
# load the model
sbml_file = os.path.join(sbml_dir, model_name) + '.xml'
red_model = networkred.ReduceModel(sbml_file)
red_status = red_model.get_reduction_status()
print('Reduction status ' + ', '.join([k + ': ' + str(v) for k, v in red_status.items()]))
# set protected parts
protect_parts = {'reactions': set(), 'metabolites': set(),
'functions': pd.DataFrame(), 'bounds': pd.DataFrame()}
protect_name = os.path.join(data_dir, protected_parts_file)
print('Reading protected parts from ' + protect_name)
protect_rids = None
protect_mids = None
protect_funcs = None
with pd.ExcelFile(protect_name) as xlsx:
if 'reactions' in xlsx.sheet_names:
protect_rids = pd.read_excel(xlsx, sheet_name='reactions', index_col=0).index.tolist()
if 'metabolites' in xlsx.sheet_names:
protect_mids = pd.read_excel(xlsx, sheet_name='metabolites', index_col=0).index.tolist()
if 'functions' in xlsx.sheet_names:
protect_funcs = pd.read_excel(xlsx, sheet_name='functions', index_col=0)
if 'bounds' in xlsx.sheet_names:
temp_fbc_bounds = pd.read_excel(xlsx, sheet_name='bounds', index_col=0)
red_model.set_protection(protect_rids=protect_rids,
protect_mids=protect_mids,
protect_funcs=protect_funcs,
temp_fbc_bounds=temp_fbc_bounds,
reset=True)
# check feasibility:
print('feasibility check:', red_model.check_feasibility(feas=True))
# remove blocked reactions:
red_model.remove_blocked()
print('blocked reaction removal')
print('feasibility check:', red_model.check_feasibility(feas=True))
red_status = red_model.get_reduction_status()
print('Reduction status ' + ', '.join([k + ': ' + str(v) for k, v in red_status.items()]))
# remove isoenzymes
red_model.remove_parallel_rids()
print('parallel reactions removal')
print('feasibility check:', red_model.check_feasibility(feas=True))
red_status = red_model.get_reduction_status()
print('Reduction status ' + ', '.join([k + ': ' + str(v) for k, v in red_status.items()]))
# prune network (for specific dof, rmin)
print('---- network pruning -----')
red_model.reduce()
# write reduced model to sbml
reduced_name = os.path.join(sbml_dir, model_name) + '_reduced.xml'
red_model.write_sbml(reduced_name)
print('reduced model convered to SBML:', reduced_name)
logging.info('Finished')
# Press the green button in the gutter to run the script.
# if __name__ == '__main__':
# print_hi('PyCharm')
......@@ -47,6 +47,7 @@ import numpy as np
import pandas as pd
from sympy import Add
import logging
import pickle
import sbmlxdf
import cobra
......@@ -99,6 +100,18 @@ def extract_params(record):
return params
def get_snapshot_names(model_ref):
"""Generates pathnames for model snapshots.
:param model_ref: filename of SBML (.xml) model
:type model_ref: str
:returns: pathnames to for model snapshots
:rtype: tuple of two str
"""
return (re.sub('.xml$', '__red_restore.json', model_ref),
re.sub('.xml$', '__red_restore.pkl', model_ref))
class ReduceModel:
_orig_model_ref = None
......@@ -116,16 +129,16 @@ class ReduceModel:
if type(model_ref) == str:
if os.path.exists(model_ref):
self._orig_model = cobra.io.read_sbml_model(model_ref)
print(f'SBML model imported from {model_ref}')
print(f'SBML model imported from: {model_ref}')
elif type(model_ref) == cobra.Model:
self._orig_model = model_ref.copy()
else:
raise AttributeError
self._orig_model_ref = model_ref
self._red_model = self._orig_model.copy()
self._red_model.id = self._orig_model.id + '_reduced'
self._red_model.name = self._orig_model.name + '_reduced'
# self._red_model = self._orig_model.copy()
# self._red_model.id = self._orig_model.id + '_reduced'
# self._red_model.name = self._orig_model.name + '_reduced'
self._essential_rids = []
self._protect_rids = None
self._protect_mids = None
......@@ -136,7 +149,7 @@ class ReduceModel:
self._eps = 1e-10
# for Python 3.8 and higher, muliprocessing fails with cpus > 1
self._cpus = 6 if sys.version_info.minor == 7 else 1
self._red_model.solver.configuration.timeout = 10
# self._red_model.solver.configuration.timeout = 10
def _set_temp_fbc_bounds(self, temp_fbc_bounds):
"""Set temporary Flux Balance lower/upper bounds in Cobra Model.
......@@ -181,12 +194,14 @@ class ReduceModel:
logging.info('original fbc bounds restored')
self._orig_fbc_bounds = {}
def set_protection(self, protect_rids=None, protect_mids=None,
protect_funcs=None, temp_fbc_bounds=None, reset=True):
def set_reduction_params(self, protect_rids=None, protect_mids=None,
protect_funcs=None, temp_fbc_bounds=None, model_base='original'):
"""Set protected parts of the model.
reaction ids and metabolite ids get converted to cobra ids (i.e. stripping of 'R_' and 'M_').
Check is performed wheather IDs exist in the model.
Check is performed whether ids exist in the model.
Also possible to restart from a snapshot, i.e. continue from a previous reduction process
that got stalled.
:param protect_rids: reaction ids to be protected during reduction
:type protect_rids: list or set of str
......@@ -195,76 +210,87 @@ class ReduceModel:
:param protect_funcs: functions/phenotypes to be preserved during reduction
:type protect_funcs: pandas DataFrame or None
:param temp_fbc_bounds: temporary fbc bounds required for protected parts
:type temp_fbc_bounds: pandas DataFrame or None
:param reset: reset to original model (True) or continue with reduced model (False)
:type reset: bool (default: True)
:returns: status information in a dictionary
:rtype: dict
:type temp_fbc_bounds: None or DataFrame with columns ['fbcLb', 'fbcUb']
:param model_base: identifies where to restart reduction from ('original', 'reduced', 'snapshot')
:type model_base: string (default: 'original')
"""
if reset:
json_snapshot, pkl_snapshot = get_snapshot_names(self._orig_model_ref)
if (model_base == 'snapshot') and os.path.exists(json_snapshot) and os.path.exists(pkl_snapshot):
self._red_model = cobra.io.load_json_model(json_snapshot)
with open(pkl_snapshot, 'rb') as f:
self._essential_rids = pickle.load(f)
print('snapshot restored at %4d remaining reactions from: %s'.format(
len(self._red_model.reactions), re.sub('json$', '', json_snapshot)))
logging.info('snapshot restored at %4d remaining reactions from: %s',
len(self._red_model.reactions), re.sub('json$', '', json_snapshot))
elif (model_base == 'original') or (self._red_model is None):
# start reduction process from base model
self._red_model = self._orig_model.copy()
self._red_model.id = self._orig_model.id + '_reduced'
self._red_model.name = self._orig_model.name + '_reduced'
self._essential_rids = []
self._essential_rids = set()
# check if reaction ids / metabolites ids used actually exist in the model
model_mids = set(m.id for m in self._orig_model.metabolites)
model_rids = set(r.id for r in self._orig_model.reactions)
# strip prefixes from reaction/metabolite ids in compliance with COBRApy
if protect_rids is not None:
protect_rids = {re.sub('^R_', '', rid) for rid in protect_rids}
if protect_mids is not None:
protect_mids = {re.sub('^M_', '', rid) for rid in protect_mids}
if temp_fbc_bounds is not None:
temp_fbc_bounds.index = temp_fbc_bounds.index.str.replace('^R_', '', regex=True)
if protect_funcs is not None:
protect_funcs['expression'] = protect_funcs['expression'].str.replace('rid=R_', 'rid=', regex=True)
# check if ids exist in the model
model_mids = set(m.id for m in self._red_model.metabolites)
model_rids = set(r.id for r in self._red_model.reactions)
if protect_rids is not None:
wrong_ids = protect_rids.difference(model_rids)
if len(wrong_ids) > 0:
logging.warning('protected reaction ids not in model %s', ', '.join(wrong_ids))
if protect_mids is not None:
protect_mids = {re.sub('^M_', '', rid) for rid in protect_mids}
wrong_ids = protect_mids.difference(model_mids)
if len(wrong_ids) > 0:
logging.warning('protected metabolite ids not in model %s', ', '.join(wrong_ids))
if temp_fbc_bounds is not None:
required_cols = ['fbcLb', 'fbcUb']
# check required DataFrame columns exist
try:
temp_fbc_bounds.iloc[0][required_cols]
except KeyError:
print('temp_fbc_bounds requires DataFrame columns %s', ', '.join(required_cols))
logging.warning('temp_fbc_bounds requires DataFrame columns %s', ', '.join(required_cols))
return False
cobra_rids = temp_fbc_bounds.index.str.replace('^R_', '', regex=True)
temp_fbc_bounds.index = cobra_rids
wrong_ids = set(cobra_rids).difference(model_rids)
wrong_ids = set(temp_fbc_bounds.index).difference(model_rids)
if len(wrong_ids) > 0:
logging.warning('temp fbc bound reaction ids not in model %s', ', '.join(wrong_ids))
self._set_temp_fbc_bounds(temp_fbc_bounds)
required_cols = ['expression', 'lb', 'ub']
if protect_funcs is not None:
# check required DataFrame columns exist
try:
protect_funcs.iloc[0][required_cols]
except KeyError:
print('protect_funcs requires DataFrame columns %s', ', '.join(required_cols))
logging.warning('protect_funcs requires DataFrame columns %s', ', '.join(required_cols))
return False
cobra_rids = protect_funcs['expression'].str.replace('rid=R_', 'rid=', regex=True)
protect_funcs['expression'] = cobra_rids
used_rids = set()
for items in cobra_rids.values:
for items in protect_funcs['expression'].values:
for item in get_items(items):
params = extract_params(item)
used_rids.add(params['rid'])
used_rids.add(extract_params(item)['rid'])
wrong_ids = used_rids.difference(model_rids)
if len(wrong_ids) > 0:
logging.warning('protected function reaction ids not in model %s', ', '.join(wrong_ids))
# check if requried columns exists in dataframes:
if temp_fbc_bounds is not None:
required_cols = {'fbcLb', 'fbcUb'}
missing_cols = required_cols.difference(temp_fbc_bounds.columns)
if len(missing_cols) > 0:
print('temp_fbc_bounds requires columns %s', ', '.join(required_cols))
logging.warning('temp_fbc_bounds requires columns %s', ', '.join(required_cols))
if protect_funcs is not None:
required_cols = {'expression', 'lb', 'ub'}
missing_cols = required_cols.difference(protect_funcs.columns)
if len(missing_cols) > 0:
print('protect_funcs requires columns %s', ', '.join(required_cols))
logging.warning('protect_funcs requires columns %s', ', '.join(required_cols))
# set protected functions
self._protect_rids = protect_rids.intersection(model_rids)
self._essential_rids = self._protect_rids
self._protect_mids = protect_mids.intersection(model_mids)
self._protect_funcs = protect_funcs
if temp_fbc_bounds is not None:
self._set_temp_fbc_bounds(temp_fbc_bounds)
if len(self._essential_rids) == 0:
self._essential_rids = self._protect_rids
logging.info('protected parts configured: %d reactants, %d metabolites, %d functions, %d fbc bounds',
len(self._protect_rids), len(self._protect_mids),
len(self._protect_funcs), len(temp_fbc_bounds))
return True
return
def _add_constraints(self, k):
"""Add constraints to the problem.
......@@ -346,6 +372,7 @@ class ReduceModel:
if len(blocked_rids) > 0:
print('protected reactions that are blocked:', ', '.join(blocked_rids))
logging.warning('protected reactions that are blocked: %s', ', '.join(blocked_rids))
return False
else:
logging.info('protected reactions are feasible')
......@@ -368,6 +395,7 @@ class ReduceModel:
if len(blocked_mids) > 0:
print('protected metabolites that are blocked:', ', '.join(blocked_mids))
logging.warning('protected metabolites that are blocked: %s', ', '.join(blocked_mids))
return False
else:
logging.info('protected metabolites are feasible')
else:
......@@ -398,9 +426,8 @@ class ReduceModel:
self._red_model = self._orig_model.copy()
self._red_model.id = self._orig_model.id + '_reduced'
self._red_model.name = self._orig_model.name + '_reduced'
self._essential_rids = []
self._essential_rids = self._protect_rids
self._orig_fbc_bounds = {}
pass
def get_reduction_status(self):
""" Return status in model reduction process
......@@ -601,7 +628,17 @@ class ReduceModel:
flux_ranges['range'] = abs(flux_ranges['maximum'] - flux_ranges['minimum'])
return flux_ranges['range'].sort_values()
def reduce(self, r_min=1, dof_min=1, feas=True):
def print_status(self):
""" Print status along reduction process
"""
self.remove_blocked()
stat = self.get_reduction_status()
print('reactions removed/remaining/protected: %4d/%4d/%4d; dof: %3d' %
(stat['delta_r'], stat['remain_r'], stat['essential_r'], stat['dof']))
logging.info('reactions removed/remaining/protected: %4d/%4d/%4d [dof: %3d]',
stat['delta_r'], stat['remain_r'], stat['essential_r'], stat['dof'])
def reduce(self, r_min=1, dof_min=1, feas=True, save_pts=0):
"""Reduce the network
:param r_min: minimum number of reactions
......@@ -610,14 +647,25 @@ class ReduceModel:
:type dof_min: int (default: 1)
:param feas: whether protected reactions/metabolite must be unblocked
:type feas: bool (default: True
:param save_pts: save snapshots every save_pts reactions (0: disabled)
:type save_pts: integer (default: 0)
"""
print('INITIAL STATUS')
self.print_status()
# initial status
stat = self.get_reduction_status()
print('reactions removed/remaining/protected: %4d/%4d/%4d; dof: %3d' %
(stat['delta_r'], stat['remain_r'], stat['essential_r'], stat['dof']))
logging.info('reactions removed/remaining/protected: %4d/%4d/%4d [dof: %3d]',
stat['delta_r'], stat['remain_r'], stat['essential_r'], stat['dof'])
print('remove blocked reactions')
self.remove_blocked()
self.print_status()
print('remove parallel reactions')
self.remove_parallel_rids()
self.print_status()
if save_pts > 0:
next_save = max(0, len(self._red_model.reactions) - save_pts)
print('next_save point at ', str(next_save))
else:
next_save = 0
while True:
flux_ranges = self._flux_ranges_protect_funcs()
......@@ -658,13 +706,10 @@ class ReduceModel:
self._remove_reactions([remove_rid])
print('removed', remove_rid)
logging.info('removed %s', remove_rid)
self.print_status()
stat = self.get_reduction_status()
print('reactions removed/remaining/protected: %4d/%4d/%4d; dof: %3d' %
(stat['delta_r'], stat['remain_r'], stat['essential_r'], stat['dof']))
logging.info('reactions removed/remaining/protected: %4d/%4d/%4d [dof: %3d]',
stat['delta_r'], stat['remain_r'], stat['essential_r'], stat['dof'])
# check some terminating conditions
stat = self.get_reduction_status()
if stat['dof'] <= dof_min:
print('STOPPED: minimum degrees of freedom reached')
logging.info('STOPPED: minimum degrees of freedom reached')
......@@ -674,12 +719,23 @@ class ReduceModel:
logging.info('STOPPED: minimum number of reactions reached')
break
# final status
stat = self.get_reduction_status()
print('reactions removed/remaining/protected: %4d/%4d/%4d; dof: %3d' %
(stat['delta_r'], stat['remain_r'], stat['essential_r'], stat['dof']))
logging.info('reactions removed/remaining/protected: %4d/%4d/%4d [dof: %3d]',
stat['delta_r'], stat['remain_r'], stat['essential_r'], stat['dof'])
# save snapshots during network reduction from where to resume after a crash
if len(self._red_model.reactions) < next_save:
json_snapshot, pkl_snapshot = get_snapshot_names(self._orig_model_ref)
cobra.io.save_json_model(self._red_model, json_snapshot)
with open(pkl_snapshot, 'wb') as f:
pickle.dump(self._essential_rids, f)
logging.info('snapshot saved at %4d remaining reactions', len(self._red_model.reactions))
next_save = max(0, len(self._red_model.reactions) - save_pts)
print('FINAL STATUS:')
self.print_status()
# remove now any snapshot filtes
for snapshot in get_snapshot_names(self._orig_model_ref):
if os.path.exists(snapshot):
os.remove(snapshot)
logging.info('removed snapshot')
def _add_orig_data(self, sbml_fname):
"""Adds units from original SBML file to reduced model"
......
<?xml version="1.0" encoding="UTF-8"?>
<!-- Created by sbmlxdf version 0.2.3 on 2021-11-03 22:28 with libSBML version 5.19.0. -->
<!-- Created by sbmlxdf version 0.2.5 on 2021-11-05 09:00 with libSBML version 5.19.0. -->
<sbml xmlns="http://www.sbml.org/sbml/level3/version2/core" xmlns:fbc="http://www.sbml.org/sbml/level3/version1/fbc/version2" level="3" version="2" fbc:required="false">
<model metaid="Deniz_model_fba" id="Deniz_model_fba_reduced" name="Deniz_model_fba" substanceUnits="substance" timeUnits="time" extentUnits="substance" fbc:strict="true">
<notes>
......@@ -59,7 +59,6 @@
</listOfCompartments>
<listOfSpecies>
<species id="M_aa_c" name="M_aa_c" compartment="c" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false" fbc:charge="0"/>
<species id="M_aapre_c" name="M_aapre_c" compartment="c" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false" fbc:charge="0"/>
<species id="M_accoa_c" name="M_accoa_c" compartment="c" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false" fbc:charge="0"/>
<species id="M_adp_c" name="M_adp_c" compartment="c" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false" fbc:charge="0"/>
<species id="M_atp_c" name="M_atp_c" compartment="c" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false" fbc:charge="0"/>
......@@ -184,6 +183,14 @@
<speciesReference species="M_rib_c" stoichiometry="1" constant="true"/>
</listOfProducts>
</reaction>
<reaction id="R_ppp2" name="R_ppp2" reversible="false" fbc:lowerFluxBound="zero_bound" fbc:upperFluxBound="default_ub">
<listOfReactants>
<speciesReference species="M_rib_c" stoichiometry="1" constant="true"/>
</listOfReactants>
<listOfProducts>
<speciesReference species="M_accoa_c" stoichiometry="3" constant="true"/>
</listOfProducts>
</reaction>
<reaction id="R_mrOx" name="R_mrOx" reversible="false" fbc:lowerFluxBound="zero_bound" fbc:upperFluxBound="default_ub">
<listOfReactants>
<speciesReference species="M_accoa_c" stoichiometry="1" constant="true"/>
......@@ -205,25 +212,6 @@
<speciesReference species="M_for_c" stoichiometry="1" constant="true"/>
</listOfProducts>
</reaction>
<reaction id="R_mr5" name="R_mr5" reversible="false" fbc:lowerFluxBound="zero_bound" fbc:upperFluxBound="default_ub">
<listOfReactants>
<speciesReference species="M_glc__D_c" stoichiometry="1" constant="true"/>
</listOfReactants>
<listOfProducts>
<speciesReference species="M_aapre_c" stoichiometry="1" constant="true"/>
</listOfProducts>
</reaction>
<reaction id="R_mr7" name="R_mr7" reversible="false" fbc:lowerFluxBound="zero_bound" fbc:upperFluxBound="default_ub">
<listOfReactants>
<speciesReference species="M_aapre_c" stoichiometry="1" constant="true"/>
<speciesReference species="M_nh4_c" stoichiometry="1" constant="true"/>
<speciesReference species="M_atp_c" stoichiometry="1" constant="true"/>
</listOfReactants>
<listOfProducts>
<speciesReference species="M_aa_c" stoichiometry="1" constant="true"/>
<speciesReference species="M_adp_c" stoichiometry="1" constant="true"/>
</listOfProducts>
</reaction>
<reaction id="R_mr9" name="R_mr9" reversible="false" fbc:lowerFluxBound="zero_bound" fbc:upperFluxBound="default_ub">
<listOfReactants>
<speciesReference species="M_lac_c" stoichiometry="1" constant="true"/>
......
# sample_reduce_model.py
# sample script for network reduction of a SBML fbc model
import sys
import os
import logging
import pandas as pd
import networkred
def reduce_model():
# activate logging
logfile = './log/nr.log'
logformat = '%(asctime)s %(levelname)s:%(name)s %(message)s'
logdir = os.path.dirname(logfile)
if not os.path.exists(logdir):
os.mkdir(logdir)
logging.basicConfig(filename=logfile, filemode='w',
level=logging.INFO, format=logformat)
logging.info('Started')
# print some environment information
print('Python version: {:d}.{:d}.{:d}'.format(sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro))
print('networkred version: {:s}'.format(networkred.__version__))
print('working directory :', os.getcwd())
# file names and directories
sbml_dir = 'sample_data/SBML_models'
data_dir = 'sample_data/data'
model_name = 'Deniz_model_fba'
protected_parts_file = 'Deniz_model_fba_pf.xlsx'
# load the original model
sbml_file = os.path.join(sbml_dir, model_name) + '.xml'
red_model = networkred.ReduceModel(sbml_file)
# set protected parts
protect_name = os.path.join(data_dir, protected_parts_file)
print('load reduction parameters from:', protect_name)
protect_rids = None
protect_mids = None
protect_funcs = None
with pd.ExcelFile(protect_name) as xlsx:
if 'reactions' in xlsx.sheet_names:
protect_rids = pd.read_excel(xlsx, sheet_name='reactions', index_col=0).index.tolist()
if 'metabolites' in xlsx.sheet_names:
protect_mids = pd.read_excel(xlsx, sheet_name='metabolites', index_col=0).index.tolist()
if 'functions' in xlsx.sheet_names:
protect_funcs = pd.read_excel(xlsx, sheet_name='functions', index_col=0)
if 'bounds' in xlsx.sheet_names:
temp_fbc_bounds = pd.read_excel(xlsx, sheet_name='bounds', index_col=0)
red_model.set_reduction_params(protect_rids=protect_rids,
protect_mids=protect_mids,
protect_funcs=protect_funcs,
temp_fbc_bounds=temp_fbc_bounds,
model_base='original')
# prune network (for specific dof, rmin), create snapshots every two reduced reactions
print('---- network pruning -----')
red_model.reduce()
# write reduced model to sbml
reduced_name = os.path.join(sbml_dir, model_name) + '_reduced.xml'
red_model.write_sbml(reduced_name)
print('reduced model converted to SBML:', reduced_name)
logging.info('Finished')
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
reduce_model()
......@@ -35,8 +35,10 @@ setup(
long_description_content_type='text/markdown',
packages=find_packages(exclude='docs'),
install_requires=['pandas>=1.02.0',
'numpy>= 1.20.0',
'cobra>=0.21.0',
'sbmlxdf>=0.2.4'
'sbmlxdf>=0.2.5',
'sympy>=1.8'
],
python_requires=">=3.7",
keywords=['modeling', 'standardization', 'network reduction', 'SBML'],
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment