import sys
import copy
import torch
import random
import numpy as np
import pandas as pd
from collections import defaultdict
from multiprocessing import Process, Queue
from torch.utils.data import DataLoader,WeightedRandomSampler
from sklearn.model_selection import StratifiedShuffleSplit
from woe_bin import woe_bin

from imblearn.over_sampling import SMOTE
from ast import literal_eval
from sklearn import metrics

# sampler for batch generation
def random_neq(l, r, s):
    t = np.random.randint(l, r)
    while t in s:
        t = np.random.randint(l, r)
    return t


def sample_function(user_train, usernum, batch_size, result_queue, SEED):
    def sample():

        user = random.choice(users)
        seq = user_train[user,1:]
        pos = user_train[user,0]

        return (seq, pos)

    np.random.seed(SEED)
    users = list(range(usernum))
    while True:
        split = StratifiedShuffleSplit(n_splits=1, train_size=batch_size/usernum)
        one_batch = []

        #  Sampling
        for train_index, test_index in split.split(user_train[:, 1:], user_train[:, 0]):
            train = user_train[train_index]
            for i in train:
                one_batch.append((i[1:],i[0]))
        # for i in range(batch_size):
        #     one_batch.append(sample())

        result_queue.put(zip(*one_batch))


class WarpSampler(object):
    def __init__(self, User, batch_size=64,  n_workers=1):
        usernum = User.shape[0]
        self.result_queue = Queue(maxsize=n_workers * 10)
        self.processors = []
        for i in range(n_workers):
            self.processors.append(
                Process(target=sample_function, args=(User,
                                                      usernum,
                                                      batch_size,
                                                      self.result_queue,
                                                      np.random.randint(2e9)
                                                      )))
            self.processors[-1].daemon = True
            self.processors[-1].start()

    def next_batch(self):
        return self.result_queue.get()

    def close(self):
        for p in self.processors:
            p.terminate()
            p.join()

# 数据分组
def data_partition(train,test):
    user_train = pd.read_csv(train,index_col=0)
    user_test = pd.read_csv(test,index_col=0)
    return [user_train.values, user_test.values]


def evaluate(model, dataset, args):
    reality = dataset[:,0]
    predictions = model.predict(dataset[:, 1:]).cpu().detach().numpy() # np array
    '''
    data = pd.DataFrame({"pro": predictions.squeeze(), "real": list(reality)})
    if len(dataset) == 44613:
        data.to_csv("new.csv", index=1)
    '''
    auc = metrics.roc_auc_score(reality, predictions)
    acc = metrics.accuracy_score(reality, np.round(predictions))
    recall = metrics.recall_score(reality, np.round(predictions))
    ap = metrics.average_precision_score(reality, np.round(predictions))
    f1 = metrics.f1_score(reality, np.round(predictions))
    return auc, acc, recall, ap, f1

def seq_sum(a, dev, weight):
    sum_a = torch.zeros(a.shape[0], a.shape[1]).to(dev)
    for i in range(a.shape[1]):
        sum_a += a
        a = a[:, 0:a.shape[1]-1, :]
        a = torch.cat((torch.zeros(a.shape[0], 1, a.shape[2]).to(dev), a), dim=1).to(dev)
    return sum_a

# square deviation
def calculate_distance_square(x, y):
    return torch.sum((x - y) ** 2, dim = -1)

