Commit 050a29c1 authored by Jan Hoeckesfeld's avatar Jan Hoeckesfeld
Browse files

added iterset wildcard

parent cc4d7666
...@@ -14,31 +14,61 @@ kmer_lengths = config['kmers'] ...@@ -14,31 +14,61 @@ kmer_lengths = config['kmers']
dataset_inputIDs = {} dataset_inputIDs = {}
#kmer_lengths = [24] #kmer_lengths = [24]
def get_inputs(dataset): def get_general_inputs(wildcards):
run_dir = dataset run_dir = wildcards.dataset
inputIDs, = glob_wildcards('data/input/'+dataset+'/{id}'+config["datasets"][dataset]['input_read_1_ending']) inputIDs, = glob_wildcards('data/input/'+wildcards.dataset+'/{id}'+config["datasets"][wildcards.dataset]['input_read_1_ending'])
dataset_inputIDs[dataset] = inputIDs dataset_inputIDs[wildcards.dataset] = inputIDs
possible_params = {'generative_model': expand('data/output/'+run_dir+'/kmers/{kmer}/predictions.probabilistic_gen.tsv',kmer=kmer_lengths), possible_params = {
'probabilistic_model': expand('data/output/'+run_dir+'/kmers/{kmer}/predictions.probabilistic_cov.tsv',kmer=kmer_lengths),
# if above:
'plot_top3_fit': expand('data/output/'+run_dir+'/kmers/{kmer}/{id}_top3fit.svg',kmer=kmer_lengths,id=inputIDs),
'distance_model': expand('data/output/'+run_dir+'/kmers/{kmer}/predictions.euclidean.tsv',kmer=kmer_lengths),
'assembly_model': expand('data/output/'+run_dir+'/{id}/exactMatches.tsv',id=inputIDs), 'assembly_model': expand('data/output/'+run_dir+'/{id}/exactMatches.tsv',id=inputIDs),
'calc_strand_bias': expand('data/output/'+run_dir+'/{id}/strandbias.txt',id=inputIDs), 'calc_strand_bias': expand('data/output/'+run_dir+'/{id}/strandbias.txt',id=inputIDs),
'mapping_diff_analysis': expand('data/output/'+run_dir+'/methodAnalysis/{id}/mapping.comparison',id=inputIDs), 'mapping_diff_analysis': expand('data/output/'+run_dir+'/methodAnalysis/{id}/mapping.comparison',id=inputIDs),
'map_filtered_reads': expand('data/output/'+run_dir+'/methodAnalysis/{id}/alignmentToGroundTruthType.sorted.bam.bai',id=inputIDs), 'map_filtered_reads': expand('data/output/'+run_dir+'/methodAnalysis/{id}/alignmentToGroundTruthType.sorted.bam.bai',id=inputIDs),
'verifyUniqueness': expand('data/output/kmers/{kmer}/uniquenessTest.tsv',kmer=kmer_lengths), 'verifyUniqueness': expand('data/output/kmers/{kmer}/uniquenessTest.tsv',kmer=kmer_lengths),
'kmer_stats_analysis': expand('data/auxiliary/'+run_dir+'/kmers/{kmer}/{id}/stats.tsv',kmer=kmer_lengths,id=inputIDs) + 'kmer_stats_analysis': expand('data/auxiliary/'+run_dir+'/kmers/{kmer}/{id}/stats.tsv',kmer=kmer_lengths,id=inputIDs)
expand('data/output/'+run_dir+'/kmers/{kmer}/{id}/spaTypesGroundTruthVennDia.svg',kmer=kmer_lengths,id=inputIDs) }
return [item for k in possible_params.keys() if config[k] for item in possible_params[k]]
def get_iterset_inputs(wildcards):
inputIDs, = glob_wildcards('data/input/'+wildcards.dataset+'/{id}'+config["datasets"][wildcards.dataset]['input_read_1_ending'])
possible_params = {
'generative_model': expand('data/output/' + wildcards.dataset +'/' + wildcards.iterset +'/kmers/{kmer}/predictions.probabilistic_gen.tsv',kmer=kmer_lengths),
'distance_model': expand('data/output/' + wildcards.dataset +'/' + wildcards.iterset +'/kmers/{kmer}/predictions.euclidean.tsv',kmer=kmer_lengths),
'probabilistic_model': expand('data/output/' + wildcards.dataset +'/' + wildcards.iterset + '/kmers/{kmer}/predictions.probabilistic_cov.tsv',kmer=kmer_lengths),
# if above:
'plot_top3_fit': expand('data/output/' + wildcards.dataset +'/' + wildcards.iterset + '/kmers/{kmer}/{id}_top3fit.svg',kmer=kmer_lengths,id=inputIDs),
'kmer_stats_analysis': expand('data/output/' + wildcards.dataset + '/' + wildcards.iterset +'/kmers/{kmer}/{id}/spaTypesGroundTruthVennDia.svg',kmer=kmer_lengths,id=inputIDs)
} }
return [item for k in possible_params.keys() if config[k] for item in possible_params[k]]
return [possible_params[k] for k in possible_params.keys() if config[k]] def use_itersets():
if config['probabilistic_model'] and config['itersets']:
return config['itersets']
return ['O']
rule all: rule all:
input: input:
[get_inputs(dataset) for dataset in config["datasets"].keys()] run_datasets = expand('data/output/{dataset}_summary.md', dataset=config['datasets'].keys())
rule run_dataset:
input:
general = get_general_inputs,
summarys = expand('data/auxiliary/{dataset}/{iterset}_summary.md', iterset=use_itersets(), allow_missing=True)
output:
summary = 'data/output/{dataset}_summary.md'
# TODO create summary
shell:
'touch {output.out}'
rule run_iterset:
input:
get_iterset_inputs
output:
out = 'data/auxiliary/{dataset}/{iterset}_summary.md'
# TODO create summary
shell:
'touch {output.out}'
##### load rules ##### ##### load rules #####
include: "rules/assembly.smk" include: "rules/assembly.smk"
......
...@@ -94,7 +94,7 @@ deviationCutoff : 2.5 ...@@ -94,7 +94,7 @@ deviationCutoff : 2.5
skipMapping: False skipMapping: False
plot_top3_fit : False plot_top3_fit : False
#choose the iterationset either O, V, OuV (O union V) or OnV (O intersect V) #choose the iterationset either O, V, OuV (O union V) or OnV (O intersect V)
itersetType : O itersets: [O,V,OnV]
###Blast Parameter ###Blast Parameter
blast_word_size : 4 blast_word_size : 4
......
...@@ -83,9 +83,9 @@ rule estimateCoverageAlignment: ...@@ -83,9 +83,9 @@ rule estimateCoverageAlignment:
rule calcPriorProbabilitiesCoverage: rule calcPriorProbabilitiesCoverage:
input: input:
likelihoods = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/likelihoods_cov.json' likelihoods = 'data/auxiliary/{dataset}/{iterset}/kmers/{kmer}/{id}/likelihoods_cov.json'
output: output:
priorFilePath = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/prior_cov.txt' priorFilePath = 'data/auxiliary/{dataset}/{iterset}/kmers/{kmer}/{id}/prior_cov.txt'
params: params:
k = lambda wildcards: wildcards.kmer, k = lambda wildcards: wildcards.kmer,
dps = config['dps'], dps = config['dps'],
...@@ -95,7 +95,7 @@ rule calcPriorProbabilitiesCoverage: ...@@ -95,7 +95,7 @@ rule calcPriorProbabilitiesCoverage:
mem = '2G', mem = '2G',
walltime = '00:05:30' walltime = '00:05:30'
log: log:
'logs/{dataset}/probabilistic/kmers/{kmer}/{id}/calcPrior_cov.log' 'logs/{dataset}/{iterset}/probabilistic/kmers/{kmer}/{id}/calcPrior_cov.log'
conda: conda:
'../envs/biopythonworkbench.yaml' '../envs/biopythonworkbench.yaml'
script: script:
...@@ -103,10 +103,10 @@ rule calcPriorProbabilitiesCoverage: ...@@ -103,10 +103,10 @@ rule calcPriorProbabilitiesCoverage:
rule calcProbabilitiesCoverage: rule calcProbabilitiesCoverage:
input: input:
likelihoods = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/likelihoods_cov.json', likelihoods = 'data/auxiliary/{dataset}/{iterset}/kmers/{kmer}/{id}/likelihoods_cov.json',
prior = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/prior_cov.txt' prior = 'data/auxiliary/{dataset}/{iterset}/kmers/{kmer}/{id}/prior_cov.txt'
output: output:
probabilities = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/scores.probabilistic_cov.tsv' probabilities = 'data/auxiliary/{dataset}/{iterset}/kmers/{kmer}/{id}/scores.probabilistic_cov.tsv'
params: params:
dps = config['dps'], dps = config['dps'],
# cluster execution # cluster execution
...@@ -117,7 +117,7 @@ rule calcProbabilitiesCoverage: ...@@ -117,7 +117,7 @@ rule calcProbabilitiesCoverage:
conda: conda:
'../envs/biopythonworkbench.yaml' '../envs/biopythonworkbench.yaml'
log: log:
'logs/{dataset}/probabilistic/kmers/{kmer}/{id}/probabilities_cov.log' 'logs/{dataset}/{iterset}/probabilistic/kmers/{kmer}/{id}/probabilities_cov.log'
script: script:
'../scripts/calcSpaTypeProbabilities.py' '../scripts/calcSpaTypeProbabilities.py'
...@@ -125,10 +125,10 @@ rule calcProbabilitiesCoverage: ...@@ -125,10 +125,10 @@ rule calcProbabilitiesCoverage:
rule createFitnessPlots: rule createFitnessPlots:
input: input:
counts = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/alignment.counts.json', counts = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/alignment.counts.json',
probabilities = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/scores.probabilistic_cov.tsv', probabilities = 'data/auxiliary/{dataset}/{iterset}/kmers/{kmer}/{id}/scores.probabilistic_cov.tsv',
ratios = 'data/auxiliary/kmers/{kmer}/spaSequencesRatios.json' ratios = 'data/auxiliary/kmers/{kmer}/spaSequencesRatios.json'
output: output:
report('data/output/{dataset}/kmers/{kmer}/{id}_top3fit.svg',category='Coverage-Based Method',caption='../report/fitnessPlot.rst') report('data/output/{dataset}/{iterset}/kmers/{kmer}/{id}_top3fit.svg',category='Coverage-Based Method',caption='../report/fitnessPlot.rst')
params: params:
dps = config['dps'], dps = config['dps'],
# cluster execution # cluster execution
......
...@@ -3,7 +3,7 @@ rule distance: ...@@ -3,7 +3,7 @@ rule distance:
readProfile = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/alignment.profile.json', readProfile = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/alignment.profile.json',
spaProfiles = 'data/auxiliary/kmers/{kmer}/spaSequences.kmerprofiles.json' spaProfiles = 'data/auxiliary/kmers/{kmer}/spaSequences.kmerprofiles.json'
output: output:
'data/auxiliary/{dataset}/kmers/{kmer}/{id}/scores.euclidean.tsv' 'data/auxiliary/{dataset}/{iterset}/kmers/{kmer}/{id}/scores.euclidean.tsv'
conda: conda:
'../envs/biopythonworkbench.yaml' '../envs/biopythonworkbench.yaml'
params: params:
......
...@@ -169,11 +169,11 @@ rule createKmerDistributionGroundTruth_COVERAGE_BASED: ...@@ -169,11 +169,11 @@ rule createKmerDistributionGroundTruth_COVERAGE_BASED:
input: input:
expectedCounts = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/expected_counts.json', expectedCounts = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/expected_counts.json',
observedCounts = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/alignment.counts.json', observedCounts = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/alignment.counts.json',
probabilities = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/scores.probabilistic_cov.tsv', probabilities = 'data/auxiliary/{dataset}/{iterset}/kmers/{kmer}/{id}/scores.probabilistic_cov.tsv',
kmerError = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/kmer_error.txt' kmerError = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/kmer_error.txt'
output: output:
errors = 'data/output/{dataset}/methodAnalysis/{kmer}/{id}/kmerErrorDistributions.svg', errors = 'data/output/{dataset}/{iterset}/methodAnalysis/{kmer}/{id}/kmerErrorDistributions.svg',
deviations = 'data/output/{dataset}/methodAnalysis/{kmer}/{id}/countDeviations.svg' deviations = 'data/output/{dataset}/{iterset}/methodAnalysis/{kmer}/{id}/countDeviations.svg'
params: params:
gt = lambda wildcards : getGroundTruthType(wildcards.id), gt = lambda wildcards : getGroundTruthType(wildcards.id),
#cluster execution #cluster execution
...@@ -184,7 +184,7 @@ rule createKmerDistributionGroundTruth_COVERAGE_BASED: ...@@ -184,7 +184,7 @@ rule createKmerDistributionGroundTruth_COVERAGE_BASED:
conda: conda:
'../envs/biopythonworkbench.yaml' '../envs/biopythonworkbench.yaml'
log: log:
'logs/{dataset}/methodAnalysis/{kmer}/{id}/kmerErrorDistributions.svg' 'logs/{dataset}/{iterset}/methodAnalysis/{kmer}/{id}/kmerErrorDistributions.svg'
script: script:
'../scripts/createKmerErrorDistributionPlots.py' '../scripts/createKmerErrorDistributionPlots.py'
...@@ -193,16 +193,16 @@ rule likelihoodAnalysis_COVERAGE_BASED: ...@@ -193,16 +193,16 @@ rule likelihoodAnalysis_COVERAGE_BASED:
input: input:
expectedCounts = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/expected_counts.json', expectedCounts = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/expected_counts.json',
observedCounts = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/alignment.counts.json', observedCounts = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/alignment.counts.json',
probabilities = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/scores.probabilistic_cov.tsv', probabilities = 'data/auxiliary/{dataset}/{iterset}/kmers/{kmer}/{id}/scores.probabilistic_cov.tsv',
kmerError = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/kmer_error.txt' kmerError = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/kmer_error.txt'
output: output:
likelihoodAnalysis = 'data/output/{dataset}/methodAnalysis/{kmer}/{id}/likelihoodAnalysis.txt' likelihoodAnalysis = 'data/output/{dataset}/{iterset}/methodAnalysis/{kmer}/{id}/likelihoodAnalysis.txt'
params: params:
gt = lambda wildcards : getGroundTruthType(wildcards.id) gt = lambda wildcards : getGroundTruthType(wildcards.id)
conda: conda:
'../envs/biopythonworkbench.yaml' '../envs/biopythonworkbench.yaml'
log: log:
'logs/{dataset}/methodAnalysis/{kmer}/{id}/likelihoodAnalysis.txt' 'logs/{dataset}/{iterset}/methodAnalysis/{kmer}/{id}/likelihoodAnalysis.txt'
script: script:
'../scripts/likelihoodBreakdown.py' '../scripts/likelihoodBreakdown.py'
...@@ -337,15 +337,15 @@ rule createSpaTypeVennDiagram: ...@@ -337,15 +337,15 @@ rule createSpaTypeVennDiagram:
input: input:
expected = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/expected_counts.json', expected = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/expected_counts.json',
observed = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/alignment.counts.json', observed = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/alignment.counts.json',
scores = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/scores.probabilistic_cov.tsv' scores = 'data/auxiliary/{dataset}/{iterset}/kmers/{kmer}/{id}/scores.probabilistic_cov.tsv'
output: output:
venngtt = 'data/output/{dataset}/kmers/{kmer}/{id}/spaTypesGroundTruthVennDia.svg', venngtt = 'data/output/{dataset}/{iterset}/kmers/{kmer}/{id}/spaTypesGroundTruthVennDia.svg',
venntopsix = 'data/output/{dataset}/kmers/{kmer}/{id}/spaTypesTopSixVennDia.svg', venntopsix = 'data/output/{dataset}/{iterset}/kmers/{kmer}/{id}/spaTypesTopSixVennDia.svg',
vennrandomsix = 'data/output/{dataset}/kmers/{kmer}/{id}/spaTypesRandomSixVennDia.svg' vennrandomsix = 'data/output/{dataset}/{iterset}/kmers/{kmer}/{id}/spaTypesRandomSixVennDia.svg'
params: params:
gtt = lambda wildcards : getGroundTruthType(wildcards.id) gtt = lambda wildcards : getGroundTruthType(wildcards.id)
log: log:
'logs/{dataset}/probabilistic/kmers/{kmer}/{id}/spaTypeVennDia.log' 'logs/{dataset}/{iterset}/probabilistic/kmers/{kmer}/{id}/spaTypeVennDia.log'
conda: conda:
'../envs/biopythonworkbench.yaml' '../envs/biopythonworkbench.yaml'
script: script:
......
...@@ -37,7 +37,7 @@ rule calcProbabilities: ...@@ -37,7 +37,7 @@ rule calcProbabilities:
likelihoods = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/likelihoods.json', likelihoods = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/likelihoods.json',
prior = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/prior.txt' prior = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/prior.txt'
output: output:
probabilities = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/scores.probabilistic_gen.tsv' probabilities = 'data/auxiliary/{dataset}/{iterset}/kmers/{kmer}/{id}/scores.probabilistic_gen.tsv'
params: params:
dps = config['dps'], dps = config['dps'],
cpus = '1', cpus = '1',
...@@ -47,7 +47,7 @@ rule calcProbabilities: ...@@ -47,7 +47,7 @@ rule calcProbabilities:
conda: conda:
'../envs/biopythonworkbench.yaml' '../envs/biopythonworkbench.yaml'
log: log:
'logs/{dataset}/probabilistic/kmers/{kmer}/{id}/probabilities.log' 'logs/{dataset}/{iterset}/probabilistic/kmers/{kmer}/{id}/probabilities.log'
script: script:
'../scripts/calcSpaTypeProbabilities.py' '../scripts/calcSpaTypeProbabilities.py'
...@@ -69,17 +69,17 @@ rule calcLikelihoods: ...@@ -69,17 +69,17 @@ rule calcLikelihoods:
V_kmers_distances = 'data/auxiliary/kmers/{kmer}/V_kmers.distances.npz', V_kmers_distances = 'data/auxiliary/kmers/{kmer}/V_kmers.distances.npz',
V_kmers = 'data/auxiliary/kmers/{kmer}/V_kmers.json' V_kmers = 'data/auxiliary/kmers/{kmer}/V_kmers.json'
output: output:
likelihoods = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/likelihoods_cov.json', likelihoods = 'data/auxiliary/{dataset}/{iterset}/kmers/{kmer}/{id}/likelihoods_cov.json',
unexpectedLikelihoods = 'data/auxiliary/{dataset}/kmers/{kmer}/{id}/unexpected_likelihoods_cov.json' unexpectedLikelihoods = 'data/auxiliary/{dataset}/{iterset}/kmers/{kmer}/{id}/unexpected_likelihoods_cov.json'
#diffs = 'data/auxiliary/kmers/{kmer}/{id}/kmer_diff.tsv' #diffs = 'data/auxiliary/kmers/{kmer}/{id}/kmer_diff.tsv'
log: log:
'logs/{dataset}/probabilistic/kmers/{kmer}/{id}/likelihoods_cov.log' 'logs/{dataset}/{iterset}/probabilistic/kmers/{kmer}/{id}/likelihoods_cov.log'
benchmark: benchmark:
'benchmarks/{dataset}/probabilistic/kmers/{kmer}/{id}/calcLikelihoodsCoverageBasedModel.txt' 'benchmarks/{dataset}/{iterset}/probabilistic/kmers/{kmer}/{id}/calcLikelihoodsCoverageBasedModel.txt'
params: params:
e = (lambda wildcards,input : extractTsvValue(input.kmerError,0)), e = (lambda wildcards,input : extractTsvValue(input.kmerError,0)),
deviationCutoff = (lambda wildcards,input : round(config['deviationCutoff']*extractCoverageEstimateFile(input.kmerCoverageEstimate,config))), deviationCutoff = (lambda wildcards,input : round(config['deviationCutoff']*extractCoverageEstimateFile(input.kmerCoverageEstimate,config))),
itersetType = config['itersetType'], itersetType = lambda wildcards: wildcards.iterset,
#cluster exectuion #cluster exectuion
cpus = '8', cpus = '8',
mem = '15G', mem = '15G',
......
...@@ -114,9 +114,9 @@ rule sort_bwa: ...@@ -114,9 +114,9 @@ rule sort_bwa:
rule summarize: rule summarize:
input: input:
results = lambda wildcards: expand('data/auxiliary/{dataset}/kmers/{{kmer}}/{id}/scores.{{mode}}.tsv',id=dataset_inputIDs[wildcards.dataset], allow_missing=True) results = lambda wildcards: expand('data/auxiliary/{dataset}/{iterset}/kmers/{{kmer}}/{id}/scores.{{mode}}.tsv',id=dataset_inputIDs[wildcards.dataset], allow_missing=True)
output: output:
summary = report('data/output/{dataset}/kmers/{kmer}/predictions.{mode}.tsv',category="Spa-Type Predictions",caption="../report/prediction.snk") summary = report('data/output/{dataset}/{iterset}/kmers/{kmer}/predictions.{mode}.tsv',category="Spa-Type Predictions",caption="../report/prediction.snk")
params: params:
# cluster execution # cluster execution
cpus = '1', cpus = '1',
...@@ -130,10 +130,10 @@ rule summarize: ...@@ -130,10 +130,10 @@ rule summarize:
rule metaSummarize: rule metaSummarize:
input: input:
summary = expand('data/output/{dataset}/kmers/{kmer}/predictions.{{mode}}.tsv',kmer=kmer_lengths, allow_missing=True), summary = expand('data/output/{dataset}/{iterset}/kmers/{kmer}/predictions.{{mode}}.tsv',kmer=kmer_lengths, allow_missing=True),
groundTruth = 'data/input/' + config['ground_truth'] groundTruth = 'data/input/' + config['ground_truth']
output: output:
meta = 'data/output/{dataset}/metaSummary.{mode}.tsv' meta = 'data/output/{dataset}/{iterset}/metaSummary.{mode}.tsv'
params: params:
# cluster execution # cluster execution
cpus = '1', cpus = '1',
...@@ -143,7 +143,7 @@ rule metaSummarize: ...@@ -143,7 +143,7 @@ rule metaSummarize:
conda: conda:
'../envs/biopythonworkbench.yaml' '../envs/biopythonworkbench.yaml'
log: log:
'logs/{dataset}/metaSummary.{mode}.log' 'logs/{dataset}/{iterset}/metaSummary.{mode}.log'
script: script:
'../scripts/metaSummarize.py' '../scripts/metaSummarize.py'
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment