Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Imène Lajili
context-aware
Commits
6d157f9c
Commit
6d157f9c
authored
Nov 16, 2016
by
Imène Lajili
Browse files
made changes in isotonic regression widgets
parent
ad2e75d3
Changes
1
Hide whitespace changes
Inline
Side-by-side
context-aware/library.py
View file @
6d157f9c
...
...
@@ -76,8 +76,6 @@ def ca_optimal_binary_threshold_selection(input_dict):
performance
=
input_dict
[
'score'
]
method
=
input_dict
[
'method'
]
#print method
cost_fn
=
input_dict
[
'cost_false_neg'
]
cost_fp
=
input_dict
[
'cost_false_pos'
]
list_score
=
[]
labels
=
''
n
=
len
(
performance
[
'actual'
])
...
...
@@ -89,51 +87,38 @@ def ca_optimal_binary_threshold_selection(input_dict):
#print counter_neg
counter_pos
=
len
([
score
for
score
in
list_score
if
score
[
0
]
==
1
])
#print counter_pos
output_dict
[
'bin_thres'
]
=
find_best_roc_weight
(
method
,
sorted_score
,
counter_pos
,
counter_neg
,
cost_fn
,
cost_fp
)
output_dict
[
'bin_thres'
]
=
find_best_roc_weight
(
method
,
sorted_score
,
counter_pos
,
counter_neg
)
return
output_dict
def
find_best_roc_weight
(
method
,
a_list
,
a_num_positives
,
a_num_negatives
,
cost_fn
,
cost_fp
):
def
find_best_roc_weight
(
method
,
a_list
,
a_num_positives
,
a_num_negatives
):
previous
=
float
(
'inf'
)
xpos
=
0
xneg
=
a_num_negatives
#return the best threshold
the_best_value
=
get_value
(
method
,
xpos
,
xneg
,
a_num_positives
,
a_num_negatives
,
cost_fn
,
cost_fp
)
#print the_best_value
#print the_best_value
#at the beginning the best is inf
the_best_value
=
get_value
(
method
,
xpos
,
xneg
,
a_num_positives
,
a_num_negatives
)
best
=
previous
for
the_elt
in
a_list
:
the_roc
=
the_elt
current
=
the_roc
[
1
]
#print current
#print the_roc
if
current
!=
previous
:
possible_best_value
=
get_value
(
method
,
xpos
,
xneg
,
a_num_positives
,
a_num_negatives
,
cost_fn
,
cost_fp
)
#
print '%f > %f' %(possible_best_value,the_best_value)
possible_best_value
=
get_value
(
method
,
xpos
,
xneg
,
a_num_positives
,
a_num_negatives
)
print
'%f > %f'
%
(
possible_best_value
,
the_best_value
)
if
possible_best_value
>
the_best_value
:
the_best_value
=
possible_best_value
#
print '%f -> %f' %(best,(previous + current) / float(2))
print
'%f -> %f'
%
(
best
,(
previous
+
current
)
/
float
(
2
))
best
=
(
previous
+
current
)
/
float
(
2
)
if
the_roc
[
0
]
==
1
:
xpos
+=
1
'''else:
xneg -= 1'''
previous
=
current
;
#print best
print
xpos
print
xneg
possible_best_value
=
get_value
(
method
,
xpos
,
xneg
,
a_num_positives
,
a_num_negatives
,
cost_fn
,
cost_fp
)
print
possible_best_value
else
:
xneg
-=
1
previous
=
current
;
possible_best_value
=
get_value
(
method
,
xpos
,
xneg
,
a_num_positives
,
a_num_negatives
)
if
possible_best_value
>
the_best_value
:
the_best_value
=
possible_best_value
best
=
(
previous
+
float
(
'-inf'
))
/
float
(
2
)
print
previous
#print best
best
=
(
previous
+
float
(
'-inf'
))
/
float
(
2
)
return
best
def
get_value
(
method
,
TP
,
TN
,
P
,
N
,
CN
,
CP
):
def
get_value
(
method
,
TP
,
TN
,
P
,
N
):
if
method
==
'accuracy'
:
accuracy
=
(
TP
+
TN
)
/
float
(
P
+
N
)
return
accuracy
...
...
@@ -145,17 +130,6 @@ def get_value(method, TP, TN, P, N,CN,CP):
recall
=
TP
/
float
(
P
)
if
method
==
'recall'
:
return
recall
Cn
=
N
/
(
P
+
N
)
Cp
=
P
/
(
P
+
N
)
'''print FP
print TN
print FN
print TP'''
#print TP
#print type (FP/(FP+TN))
Cost
=
((
FP
/
(
FP
+
TN
))
*
float
(
CN
))
+
((
FN
/
(
TP
+
FN
))
*
float
(
CP
))
if
method
==
'Cost'
:
return
Cost
if
TP
+
FP
>
0
:
precision
=
TP
/
float
(
TP
+
FP
)
if
method
==
'precision'
:
...
...
@@ -174,19 +148,21 @@ def Context_Bcalibration(input_dict):
old_actual
=
non_calibrated_scores
[
'actual'
]
old_predicted
=
non_calibrated_scores
[
'predicted'
]
g2
=
sort_list2
(
old_predicted
,
old_actual
)
print
g2
#
print g2
Z
=
g2
[
'actual'
]
print
len
(
Z
)
#
print len(Z)
#print Z
Z2
=
g2
[
'predicted'
]
print
len
(
Z2
)
output_dict
=
{}
list3
=
[]
for
i
in
Z
:
list3
.
append
(
i
)
#print list3
L
=
search_for_position
(
list3
)
#print L
#print old_actual
while
non_decreasing
(
L
)
!=
True
:
L
=
search_for_position
(
L
)
g2
=
sort_list2
(
old_predicted
,
old_actual
)
output_dict
[
'builded_scores'
]
=
{
'noncalibrated_scr'
:
Z2
,
'class'
:
Z
,
'calibrated_scr'
:
L
}
print
output_dict
...
...
@@ -194,13 +170,14 @@ def Context_Bcalibration(input_dict):
def
Context_Acalibration
(
input_dict
):
from
numpy
import
inf
import
math
dict
=
input_dict
[
'test_scores'
]
sc
=
input_dict
[
'builded_scores'
]
X1
=
sc
[
'calibrated_scr'
]
print
len
(
X1
)
print
dict
[
'predicted'
]
#
print len(X1)
#
print dict['predicted']
k
=
0
...
...
@@ -233,24 +210,16 @@ def Context_Acalibration(input_dict):
list_max
.
append
(
j
)
max
=
j
print
"what's up man!"
print
begin_score
print
end_score
print
list_max
for
scr
in
dict
[
'predicted'
]:
k
+=
1
scr2
=
0
print
scr
for
i
in
range
(
len
(
begin_score
)):
if
(
scr
>=
begin_score
[
i
]
and
scr
<=
end_score
[
i
])
or
(
scr
==
begin_score
[
i
])
or
(
scr
==
end_score
[
i
]):
scr2
=
list_max
[
i
]
if
scr2
==
0
:
scr2
=
0.1
elif
scr2
==
1
:
scr2
=
0.98
elif
scr
>
end_score
[
i
]
and
scr
<
begin_score
[
i
+
1
]:
...
...
@@ -258,35 +227,29 @@ def Context_Acalibration(input_dict):
scr2
=
list_max
[
i
]
+
((
list_max
[
i
+
1
]
-
list_max
[
i
])
*
val1
)
if
scr2
==
0
:
scr2
=
0.1
elif
scr2
==
1.0
:
scr2
=
0.98
list_scr
.
append
(
scr2
)
print
end_score
[
len
(
end_score
)
-
1
]
print
len
(
list_scr
)
print
list_scr
for
j
in
list_scr
:
print
j
probs
.
append
(
math
.
log10
(
j
/
(
1
-
j
)))
print
len
(
test_cls
)
#print len(probs)
#print probs
#print j
if
j
==
0
:
probs
.
append
(
float
(
-
inf
))
elif
j
==
1
:
probs
.
append
(
float
(
+
inf
))
else
:
probs
.
append
(
math
.
log10
(
j
/
(
1
-
j
)))
#print len(test_cls)
output_dict
=
{}
output_dict
[
'calibrated_scores'
]
=
{
'actual'
:
test_cls
,
'predicted'
:
probs
}
return
output_dict
def
search_for_position
(
list
):
print
len
(
list
)
l1
=
list
l2
=
l1
print
l2
#
print l2
new_dict
=
{}
new_dict
[
'actual'
]
=
l2
for
i
in
range
(
1
,
len
(
l1
)):
#
print 'etape n%s '%i
print
'etape n%s '
%
i
k
=
2
list2
=
[]
list3
=
[]
...
...
@@ -296,7 +259,6 @@ def search_for_position(list):
j
=
1
if
l1
[
i
]
<
l1
[
i
-
1
]
:
j
+=
1
#print 'hello n 1'
print
j
list2
.
append
(
l1
[
i
])
list2
.
append
(
l1
[
i
-
1
])
...
...
@@ -305,42 +267,31 @@ def search_for_position(list):
for
n
in
range
(
2
,
i
):
if
l1
[
i
-
1
]
==
l1
[
i
-
n
]
and
var
==
True
and
i
!=
1
:
j
+=
1
#print 'here%f'%j
list2
.
append
(
l1
[
i
-
n
])
list3
.
append
(
i
-
n
)
aux
=
False
else
:
var
=
False
if
aux
==
False
:
#print j
#print 'hello n 2'
c
=
sum
(
list2
)
#print c
#print list3
for
z
in
list3
:
print
z
l1
[
z
]
=
float
(
c
)
/
j
print
l1
c
=
sum
(
list2
)
#print c
#print list3
for
z
in
list3
:
print
z
l1
[
z
]
=
float
(
c
)
/
j
else
:
print
list2
print
j
print
'hello n 3'
#list[i-1]=list[i]=(list[i]+list[i-1]/j)
x
=
((
float
(
l1
[
i
]
+
l1
[
i
-
1
]))
/
2
)
l1
[
i
]
=
l1
[
i
-
1
]
=
x
print
j
print
x
print
l1
list4
=
search_for_position
(
l1
)
print
"hi ! i am list n4"
print
list4
#print list
print
new_dict
print
list2
#list[i-1]=list[i]=(list[i]+list[i-1]/j)
x
=
((
float
(
l1
[
i
]
+
l1
[
i
-
1
]))
/
2
)
l1
[
i
]
=
l1
[
i
-
1
]
=
x
print
j
print
x
print
new_dict
return
l1
def
sort_list2
(
list1
,
list2
):
n
=
len
(
list1
)
#print n
Dict
=
{}
for
i
in
range
(
n
-
1
):
#print i
...
...
@@ -360,7 +311,10 @@ def sort_list2(list1,list2):
aux2
=
list2
[
i
]
list2
[
i
]
=
list2
[
pos
]
list2
[
pos
]
=
aux2
print
list1
#
print list1
Dict
=
{
'actual'
:
list2
,
'predicted'
:
list1
}
return
Dict
\ No newline at end of file
def
non_decreasing
(
L
):
return
all
(
x
<=
y
for
x
,
y
in
zip
(
L
,
L
[
1
:]))
\ No newline at end of file
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment