#-*-encoding:utf-8-*-
import math
import csv_io
import numpy as np
from sklearn.cross_validation import train_test_split
import sample
import matplotlib.pyplot as plt

def shannon_entropy(dataset):
    num = dataset.shape[0]
    positive = np.count_nonzero(dataset[:, -1])
    negative = num - positive
    pos_prob = positive * 1.0 / num
    neg_prob = negative * 1.0 / num
    if negative == 0 or positive == 0:
        return 0
    entropy = 0 - pos_prob * math.log(pos_prob, 2) - neg_prob * math.log(neg_prob, 2)
    return entropy

def count_equal(dataset, value=0, column=-1):
    # assert type(dataset) is list
    total = 0
    for item in dataset:
        if item[column] == value:
            total += 1
    return total

def check(pro):
    pro.sort()
    if   pro[1] < 0.4:
        return True
    return False


def get_lines(dataset, feat):
    bound = []
    size = len(dataset)
    for i in range(1, size - 1):
        if dataset[i-1][feat] == dataset[i][feat]:
            continue
        else:
            bound.append(i)
    print "get_lines feat {0} len bound {1}".format(feat, len(bound))
    # enumerate pair of values in bound
    # find the smallest entropy
    best_left = -1
    best_right = -1
    best = None
    if len(bound) >= 2:
        for _i, i in enumerate(bound):
            for _j, j in enumerate(bound[_i+1 :]):
                entropy = 0
                # part 1
                entropy += float(i * 1.0 / size) * shannon_entropy(dataset[:i])
                # part 2
                entropy += float((j-i) * 1.0 / size) * shannon_entropy(dataset[i:j])
                # part 3
                entropy += float((size - j) * 1.0 /size) * shannon_entropy(dataset[j:])
                #print "entropy ... {0}, {1}, {2}".format(i, j, entropy)
                # proportition = [0, 0, 0]
                # proportition[0] = np.count_nonzero(dataset[:i, -1]==0) * 1.0 / i
                # proportition[1] = np.count_nonzero(dataset[i:j, -1]==0) * 1.0 / (j - i)
                # proportition[2] = np.count_nonzero(dataset[j:, -1]==0) * 1.0 / (size - j)
                # x = sum(proportition) * 1.0
                # proportition[0] /= x
                # proportition[1] /= x
                # # proportition[2] /= x
                # if not best or check(proportition):
                #     best = proportition
                #     best_left = i
                #     best_right = j
                #     break
                if not best or best > entropy:
                    best = entropy
                    best_left = i
                    best_right = j
            # if not best:
            #     break
    else:
        best_left = bound[0] / 3
        best_right = bound[0] * 2 / 3

    left_bound = (dataset[best_left, feat] + dataset[best_left - 1, feat]) / 2
    right_bound = (dataset[best_right, feat] + dataset[best_right - 1, feat]) / 2

    # calculate score
    # total_bad = np.count_nonzero(dataset[:, -1] == 0)
    left_bad = np.count_nonzero(dataset[:best_left, -1] == 0) * 1.0 / best_left
    middle_bad = np.count_nonzero(dataset[best_left:best_right, -1] == 0) * 1.0 / (best_right - best_left)
    right_bad = np.count_nonzero(dataset[best_right:, -1] == 0) * 1.0 / (size - best_right)
    # total_good = np.count_nonzero(dataset[:-1])
    # left_good = best_left - left_bad
    # middle_good = best_right - best_left - middle_bad
    # right_good = size - best_right - right_bad
    #
    # if left_bad != 0 and left_good != 0:
    #     left_score = math.log(left_good * 1.0 / total_good / (left_bad * 1.0 / total_bad))
    #     left_score = 1 / (1 + math.exp(-left_score))
    # elif left_bad == 0:
    #     left_score = 1
    # else:
    #     left_score = 0
    #
    # if middle_bad != 0 and middle_good != 0:
    #     middle_score = math.log( (middle_good * 1.0 / total_good) / (middle_bad * 1.0 / total_bad))
    #     middle_score = 1 / (1 + math.exp(-middle_score))
    # elif left_bad == 0:
    #     middle_score = 1
    # else:
    #     middle_score = 0
    # if right_bad != 0 and right_good != 0:
    #     right_score = (right_good * 1.0 / total_good) /  (right_bad * 1.0 / total_bad)
    #     right_score = 1 / (1 + math.exp(-right_score))
    # elif right_bad == 0:
    #     right_score = 1
    # else:
    #     right_score = 0
    # score = max(left_score, max(middle_score, right_score))
    # left_score /= score
    # middle_score /= score
    # right_score /= score

    score = 1 - min(left_bad, min(middle_bad, right_bad))

    # print middle_bad

    left_score = (1 - left_bad * 1.0)  / score
    middle_score = (1 - middle_bad * 1.0) / score
    right_score = (1 - right_bad * 1.0 ) / score

    return left_bound, right_bound, left_score, middle_score, right_score

def get_weight(dataset, feature_weight):
    weight={}

    for i in range(len(feature_weight)):
        if feature_weight[i][2] > 0.001:
            indices = np.argsort(dataset[:, i])
            dataset = dataset[indices]
            left_bound, right_bound, left_score, middle_score, right_score = get_lines(dataset, i)
            print left_bound, right_bound, left_score, middle_score, right_score
            left_score *= feature_weight[i][2]
            middle_score *= feature_weight[i][2]
            right_score *= feature_weight[i][2]
            weight.setdefault(i, [left_bound, right_bound, left_score, middle_score, right_score])
        else:
            weight.setdefault(i, [0, 0, 0, 0, 0])
    return weight

def score_one(entry, weight):
    score = 0
    assert type(weight) is dict
    for i, value in enumerate(entry):
        item = weight.get(i, [0, 0, 0, 0, 0])
        if value < item[0]:
            score += item[2]
        elif value < item[1]:
            score += item[3]
        else:
            score += item[4]
    return score

def credit_score(dataset, weight):
    user_score = []
    for entry in dataset:
        implicit_score = score_one(entry[1:-1], weight)
        # explicit_score = 100 * (1 / (1 + math.exp(entry[-2]))) + 100 * (1 / (1 + math.exp(entry[-3])))
        explicit_score = 0
        # user_score.append((entry[0], score_one(entry[1:-2], weight)))
        user_score.append((entry[0], implicit_score + explicit_score))
    return user_score

def parse_w(line):
    entry = [line[0], line[1], float(line[2].strip())]
    return entry

def evaluation(user_score, dataset, weight):

    label = dict()
    for i, item in enumerate(user_score):
        label.setdefault(item[0], dataset[i, -1])
    tmp = sorted(user_score, key=lambda d: d[1], reverse=True)
    count = {}
    count.setdefault(0, 0)
    for i, item in enumerate(tmp):
        count.setdefault(i + 1, count[i])
        user = item[0]
        if label[user] == 0:
            count[i + 1] = count[i] + 1

    for i in range(100, 20001, 100):
        print "{0}, bad number {1}, bad rate {2}".format(i, count[i], count[i] * 1.0 / i)

    x = range(1, len(user_score), 50)
    y = [count[i] * 1.0 for i in x]

    with open("../../data/feat/number.txt", "wb") as f:
        for i, v in enumerate(x):
            f.write("{0},{1}\n".format(v, y[i]))
    plt.plot(x, y)
    plt.xlabel("rank")
    plt.ylabel("negative sample rate")
    plt.title("relation between rank n and negative sample rate")
    plt.show()

def process():
    # reading feature.w
    # f_feature_w = "../../data/feat/try1/feature.w"
    f_feature_w = "../../data/feat/try1/feature.w"
    feature_w = csv_io.read_csv(f_feature_w, parse=parse_w, header=False)

    # scale x100
    total = 0
    for item in feature_w:
        if item[2] > 0.001:
            total += item[2]

    for i, item in enumerate(feature_w):
        if item[2] > 0.001:
            feature_w[i][2] = int(item[2] / total * 100)

    # load dataset
    dataset = csv_io.load_csv('../../data/feat/try1/feature.csv', parse=csv_io.parse_feature, header=True)

    data = dataset[1:]

    data = np.array(data)

    feature, violation = data[:, 1:-2], data[:, -2:]

    violation = np.sum(violation, 1)

    negative = violation != 0

    label = np.ones(violation.shape)

    label[negative] = 0

    x_train, x_test, y_train, y_test = train_test_split(feature, label, test_size=0.5)

    x_train, y_train = sample.down_sample(x_train, y_train, 1.0)

    X = np.zeros((x_train.shape[0], x_train.shape[1] + 1))

    X[:, :-1] = x_train

    X[:, -1] = y_train

    print X.shape

    weight = get_weight(X, feature_w)

    print "weight "

    f_weight="../../data/feat/try1/weight.txt"

    csv_io.write_csv(f_weight, weight.items())

    X = np.zeros((data.shape[0], x_train.shape[1] + 2))
    X[:, 0] = data[:, 0]
    X[:, 1:-1] = data[:, 1:-2]
    X[:, -1] = label

    user_score = credit_score(X, weight)

    evaluation(user_score, X, weight)

    f_score = "../../data/feat/try1/t_score.txt"

    csv_io.write_csv(f_score, user_score, header="user,score")

if __name__ == '__main__':
    process()