import random
import mmh3
import numpy as np
from collections import Counter

class Data(object):
    def __init__(self, positives, negatives):
        self.positives = positives
        self.negatives = negatives

def shuffle_for_training(negatives, positives):
    a = [(i, 0) for i in negatives]
    b = [(i, 1) for i in positives]
    combined = a + b
    random.shuffle(combined)
    return list(zip(*combined))

def string_digest(item, index):
    return mmh3.hash(bytes(item, 'utf-8'), index)


def split_negatives(data, train_portion=0.9):
    size = len(data.negatives)
    s1 = data.negatives[0:int(train_portion * size)]
    s2 = data.negatives[int(train_portion * size):]
    return (s1, s2)

def vectorize_dataset(text_X, text_y, maxlen):
    # Adapted from Keras examples
    print("Vectorizing data...")
    # 将所有text_x的网址连接成一个字符串
    raw_text = ''.join(text_X)
    print("Corpus length", len(raw_text)) # 24976641
    chars = sorted(list(set(raw_text)))
    print(chars) # 网址中所有出现过的字符，并且按照升序排序
    print('Total chars:', len(chars)) # 38

    # lengths(list:1319799):所有网址的长度
    lengths = [len(url) for url in text_X]
    # 计算每个长度的网址出现的次数，例如长度17的网址有100个为：7：100
    counter = Counter(lengths)
    # 按照网址长度进行排序
    counts = sorted([(key, counter[key]) for key in counter])
    print(counts)
    max_seen = 0
    for url in text_X:
        max_seen = max(len(url), max_seen)
    print("max seen length of URL", max_seen) #124(可以直接从counts中读取最长的URL长度)
    print("Using maxlen", maxlen)
    # 将出现过的字符编入字典，对应的数字从1开始：’-‘：1,’.‘：2,...
    char_indices = dict((c, i + 1) for i, c in enumerate(chars))
    # 将出现过的字符编入字典，于上式相反：1：’-‘,2：’.‘,...
    indices_char = dict((i + 1, c) for i, c in enumerate(chars))

    # 0 in this indicates empty word, 1 through len(chars) inclusive
    # indicates a particular char
    X = np.zeros((len(text_X), maxlen), dtype=np.int)
    # X(1319799,50) y(1319799,)初始全false
    y = np.zeros((len(text_X)), dtype=np.bool)
    for i, url in enumerate(text_X):
        offset = max(maxlen - len(url), 0) # 该条url的长度与最长的url的偏差
        for t, char in enumerate(url):
            if t >= maxlen:
                break
            X[i, t + offset] = char_indices[char]
        y[i] = 1 if text_y[i] == 1 else 0

    # X：行号i--代表第i条网址；每行由一个长度50的ndarray组成，其中从offset开始每个列号存储其对应字母在char_indices的value
    # y：将原来的0/1标签置换为False/True
    return X, y, char_indices, indices_char

def test_model(model, text_X, text_y):
    total = float(len(text_X))
    total_correct = 0.0
    false_positives = 0.0
    false_negatives = 0.0
    for i, url in enumerate(text_X):
        raw_pred = model.predict(url)
        pred = 1 if raw_pred > 0.5 else 0
        label = text_y[i]
        if pred == label:
            total_correct += 1
        else:
            if pred == 1:
                false_positives += 1
            else: 
                false_negatives += 1
    return total_correct / total, false_positives / total, false_negatives / total


def evaluate_model(model, positives, negatives_train, negatives_dev, negatives_test, threshold):
    false_negatives = 0.0
    preds = model.predicts(positives)
    for pred in preds:
        if pred <= threshold:
            false_negatives += 1

    print(false_negatives / len(positives), "false negatives for positives set.")

    false_positives_train = 0.0
    preds = model.predicts(negatives_train)
    for pred in preds:
        if pred > threshold:
         false_positives_train += 1

    false_positives_dev = 0.0
    preds = model.predicts(negatives_dev)
    for pred in preds:
        if pred > threshold:
         false_positives_dev += 1

    false_positives_test = 0.0
    preds = model.predicts(negatives_test)
    for pred in preds:
        if pred > threshold:
         false_positives_test += 1

    print(false_positives_train / len(negatives_train), "false positive rate for train.")
    print(false_positives_dev / len(negatives_dev), "false positive rate for dev.")
    print(false_positives_test / len(negatives_test), "false positive rate for test.")
