Commit c2baa9c2 authored by caminha's avatar caminha
Browse files

Merge branch 'ndnSIM-ns-3.29' of...

Merge branch 'ndnSIM-ns-3.29' of icube-forge.unistra.fr:rcaminha/nanoas-proof-of-work into ndnSIM-ns-3.29
parents 990a7592 2522abfd
*.trace
experiments/
cluster_experiments
/*.err
/*.out
*.diff
*.orig
......
......@@ -16,4 +16,4 @@ NAME="dsubs.$SLURM_JOB_ID"
[[ -v SLURM_ARRAY_JOB_ID ]] && NAME="$NAME.aid$SLURM_ARRAY_JOB_ID"
[[ -v SLURM_RESTART_COUNT ]] && NAME=$NAME.r$SLURM_RESTART_COUNT
python3 scripts/run_multiple.py --nruns=$NRUNS $NAME random-domain-streams --appScenario=DirectSubscriptions --nConsumers='100 150 200 250 300'
python3 scripts/run_multiple.py --nruns=$NRUNS $NAME random-domain-streams --appScenario=DirectSubscriptions --nConsumers='100 175 250 325 400'
#!/bin/bash
#SBATCH -N 1 --exclusive
#SBATCH -D /b/home/icube/rcaminha/nas-ndnsim
#SBATCH -o dsubsV2.%j.out
#SBATCH -e dsubsV2.%j.err
#SBATCH -J dsubsV2
if [ -z "$1" ]; then
NRUNS=5
else
NRUNS="$1"
fi
NAME="dsubs.$SLURM_JOB_ID"
[[ -v SLURM_ARRAY_JOB_ID ]] && NAME="$NAME.aid$SLURM_ARRAY_JOB_ID"
[[ -v SLURM_RESTART_COUNT ]] && NAME=$NAME.r$SLURM_RESTART_COUNT
python3 scripts/run_multiple.py --nruns=$NRUNS $NAME random-domain-streams --appScenario=DirectSubscriptions --nConsumers='325 400'
#!/bin/bash
#SBATCH -N 1 --exclusive
#SBATCH -D /b/home/icube/rcaminha/nas-ndnsim
#SBATCH -o dsubsV3.%j.out
#SBATCH -e dsubsV3.%j.err
#SBATCH -J dsubsV3
if [ -z "$1" ]; then
NRUNS=5
else
NRUNS="$1"
fi
NAME="dsubs.$SLURM_JOB_ID"
[[ -v SLURM_ARRAY_JOB_ID ]] && NAME="$NAME.aid$SLURM_ARRAY_JOB_ID"
[[ -v SLURM_RESTART_COUNT ]] && NAME=$NAME.r$SLURM_RESTART_COUNT
python3 scripts/run_multiple.py --nruns=$NRUNS $NAME random-domain-streams --appScenario=DirectSubscriptions --nConsumers='250 325 400'
#!/bin/bash
#SBATCH -N 1 --exclusive
#SBATCH -D /b/home/icube/rcaminha/nas-ndnsim
#SBATCH -o dsubsV4.%j.out
#SBATCH -e dsubsV4.%j.err
#SBATCH -J dsubsV4
if [ -z "$1" ]; then
NRUNS=5
else
NRUNS="$1"
fi
NAME="dsubs.$SLURM_JOB_ID"
[[ -v SLURM_ARRAY_JOB_ID ]] && NAME="$NAME.aid$SLURM_ARRAY_JOB_ID"
[[ -v SLURM_RESTART_COUNT ]] && NAME=$NAME.r$SLURM_RESTART_COUNT
python3 scripts/run_multiple.py --nruns=$NRUNS $NAME random-domain-streams --appScenario=DirectSubscriptions --nConsumers='400'
......@@ -16,4 +16,4 @@ NAME="rtrees.$SLURM_JOB_ID"
[[ -v SLURM_ARRAY_JOB_ID ]] && NAME="$NAME.aid$SLURM_ARRAY_JOB_ID"
[[ -v SLURM_RESTART_COUNT ]] && NAME=$NAME.r$SLURM_RESTART_COUNT
python3 scripts/run_multiple.py --nruns=$NRUNS $NAME random-domain-streams --appScenario=RandomTrees --nConsumers='100 150 200 250 300'
python3 scripts/run_multiple.py --nruns=$NRUNS $NAME random-domain-streams --appScenario=RandomTrees --nConsumers='100 175 250 325 400'
......@@ -51,7 +51,7 @@ main(int argc, char *argv[])
randomTransformationCount = 3,
randomTransformationParamCount = 3,
subscriptionIntervalCount = 4;
double percSystemActivation = 0.75;
double percSystemActivation = 0.9;
std::string appScenario = "RandomTrees",
subEquivalence = "multiplier";
......
This diff is collapsed.
This diff is collapsed.
......@@ -2,6 +2,7 @@ import re
import pandas as pd
from os import path
import logging
import numpy as np
def get_node_type(node_name):
client_name_pattern = r"^C\d+$"
......@@ -50,6 +51,13 @@ def read_statistics(dir_path = '', read_l3_dataset=True, read_cs_dataset=True, r
cs_dataset = pd.read_csv(path.join(dir_path, "cs-trace.trace"), sep='\s+')
# cs_dataset['Type'] = cs_dataset.loc[:, 'Type'].astype('category')
cs_dataset['NodeType'] = pd.Series(map(get_node_type, cs_dataset['Node']), dtype='category')
cache_size_mask = cs_dataset['Type'].str.contains('CacheSizeByte')
tmp = cs_dataset[cache_size_mask].copy(deep=True)
tmp['Type'] = 'CacheSizeMbyte'
tmp['Packets'] = tmp['Packets'].divide(1000.0)
cs_dataset = pd.concat([cs_dataset, tmp], ignore_index=True).sort_values(['Time', 'Node'])
else:
cs_dataset = None
......@@ -71,6 +79,8 @@ def read_statistics(dir_path = '', read_l3_dataset=True, read_cs_dataset=True, r
l3_dataset = pd.read_csv(path.join(dir_path, "l3-rate-trace.trace"), sep='\s+')
# l3_dataset['Type'] = l3_dataset['Type'].astype('category')
l3_dataset['NodeType'] = pd.Series(map(get_node_type, l3_dataset['Node']), dtype='category')
l3_dataset['Megabytes'] = l3_dataset['Kilobytes'].divide(1000)
l3_dataset['MegabytesRaw'] = l3_dataset['KilobytesRaw'].divide(1000)
else:
l3_dataset = None
......@@ -85,3 +95,8 @@ def read_statistics(dir_path = '', read_l3_dataset=True, read_cs_dataset=True, r
logger.info("Finished reading statistics")
return sim_parameters, cs_dataset, stream_hits_dataset, l3_dataset, consumer_dataset
def get_iqr_outliers(series):
q1, q3 = np.quantile(series, [.25, .75])
iqr = q3 - q1
return (series < q1 - iqr*1.5) | (series > q3 + iqr*1.5)
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment