summaryrefslogtreecommitdiffstats
path: root/Src/PyCatcher/src/evaluators.py
diff options
context:
space:
mode:
authorTom2012-05-21 18:30:02 +0200
committerTom2012-05-21 18:30:02 +0200
commit818bd7c053bb9c17f323134a244e6822fa02b99f (patch)
treeade9ada0879c08a6ae329287bf1f4301fb1d849d /Src/PyCatcher/src/evaluators.py
parentfinished experiments seciton apart from tables where data is needed and appendix (diff)
downloadimsi-catcher-detection-818bd7c053bb9c17f323134a244e6822fa02b99f.tar.gz
imsi-catcher-detection-818bd7c053bb9c17f323134a244e6822fa02b99f.tar.xz
imsi-catcher-detection-818bd7c053bb9c17f323134a244e6822fa02b99f.zip
added sections on paging and paging feature in c code, lot of minor changes in tex files
Diffstat (limited to 'Src/PyCatcher/src/evaluators.py')
-rw-r--r--Src/PyCatcher/src/evaluators.py54
1 files changed, 43 insertions, 11 deletions
diff --git a/Src/PyCatcher/src/evaluators.py b/Src/PyCatcher/src/evaluators.py
index 719f996..759d400 100644
--- a/Src/PyCatcher/src/evaluators.py
+++ b/Src/PyCatcher/src/evaluators.py
@@ -1,14 +1,10 @@
from rules import RuleResult
+from settings import Rule_Groups, Rule_Weights
class EvaluatorSelect:
CONSERVATIVE = 0
WEIGHTED = 1
- BAYES = 2
- MACHINE = 3
-
-class StationClass:
- BASE_STATION = 0
- CATCHER = 1
+ GROUP = 2
class Evaluator:
@@ -35,11 +31,47 @@ class ConservativeEvaluator(Evaluator):
break
return final_result, {'Decision founded on': decision_rule}
-class BayesEvaluator(Evaluator):
- return_type = type(int)
+
class WeightedEvaluator(Evaluator):
- return_type = type(int)
+ identifier = 'Weighted Evaluator'
+
+ def evaluate(self, result_list):
+ for rule, evaluation in reseult_list:
+ pass
+
+
+
+class GroupEvaluator(Evaluator):
+ identifier = 'Group Evaluator'
+
+ def evaluate(self, result_list):
+ group_results = []
+ for group in Rule_Groups:
+ group_results.append(self.evaluate_group_results(self.convert_to_group_result_list(group,result_list)))
+
+ if group_results.count(RuleResult.CRITICAL) > 0:
+ return RuleResult.CRITICAL
+ elif group_results.count(RuleResult.WARNING) > 0:
+ return RuleResult.WARNING
+ else:
+ return RuleResult.OK
+
+ def convert_to_group_result_list(self, group, result_list):
+ group_result_list = []
+ for rule in group:
+ group_results.append(result_list[rule])
+ return group_result_list
-class MachineLearningEvaluator(Evaluator):
- return_type = type(StationClass) \ No newline at end of file
+ def evaluate_group_results(self, results):
+ oks = results.count(RuleResult.OK)
+ warnings = results.count(RuleResult.WARNING)
+ criticals = results.count(RuleResult.CRITICAL)
+ if criticals >= oks and criticals >= warnings:
+ return RuleResult.CRITICAL
+ elif warnings >= oks and warnings>= criticals:
+ return RuleResult.WARNING
+ elif oks >= criticals and oks >= warnings:
+ return RuleResult.OK
+ else:
+ return RuleResult.IGNORE \ No newline at end of file