# -*- coding : utf-8 -*-
import numpy as np
import pandas as pd
import random
import csv
import torch
from sklearn.model_selection import train_test_split
from config import cfg

def get_user_id_list(user_id_file_path):
    print("get user id list ...")
    user_id_data = []
    with open(user_id_file_path, 'r') as user_id_file:
        user_id_data_rows = csv.reader(user_id_file, delimiter=',', lineterminator='\n')
        for rawRow in user_id_data_rows:
            if rawRow[0] not in user_id_data:
                user_id_data.append(rawRow[0])
        user_id_data = np.array(user_id_data).astype(np.int)
    return user_id_data

def sliding_window(x_data, y_data, window_size, step_size):
    x_sample_data = []
    y_sample_data = []
    for i in range(len(x_data)):
        for j in range(0, len(x_data[i]) - window_size + 1, step_size):
            x_sample_data.append(x_data[i][j: j + window_size])
            y_sample_data.append(y_data[i])
    return np.array(x_sample_data), np.array(y_sample_data)


def get_positive_data(positive_label, x_data_filepath, y_data_filepath):
    print('  get dataset for positive user:' + str(positive_label))
    # 读取 y_data.csv
    y_df = pd.read_csv(y_data_filepath, header=None)
    # 获取正样本的用户id
    positive_idx = y_df[y_df[0] == positive_label].index
    # 读取 x_data.csv，每次读取一行
    x_data = []
    y_data = np.ones(len(positive_idx)).astype(np.int)
    with open(x_data_filepath, 'r') as f:
        for i, line in enumerate(f):
            if i in positive_idx:
                sample = np.array(line.split(',')).astype(np.float)
                x_data.append(sample)
    # 转换成 numpy array
    x_data = np.array(x_data)
    # 打乱正样本数据和标签
    permutation = np.random.permutation(len(x_data))
    x_data = x_data[permutation]
    return x_data, y_data


def consolidate_dataset(positive_data, negative_data, positive_labels, negative_labels, data_row_size, step , word_dict_size = 1000) -> tuple:
    m = int(negative_data.shape[0]/positive_data.shape[0])

    positive_data_train, positive_data_test, positive_labels_train, positive_labels_test \
        = get_train_test(positive_data, positive_labels, data_row_size, step, word_dict_size,shuffle=False)

    positive_data_train, positive_labels_train \
        = repeat_samples(positive_data_train, positive_labels_train, m)

    positive_data_test, positive_labels_test \
        = repeat_samples(positive_data_test, positive_labels_test, m)

    negative_data_train, negative_data_test, negative_labels_train, negative_labels_test \
        = get_train_test(negative_data, negative_labels, data_row_size, step, word_dict_size)

    x_train, y_train = random(np.concatenate((positive_data_train, negative_data_train)),
                              np.concatenate((positive_labels_train, negative_labels_train)))
    x_test, y_test = random(np.concatenate((positive_data_test, negative_data_test)),
                            np.concatenate((positive_labels_test, negative_labels_test)))

    return x_train, x_test, y_train, y_test


def get_train_test(x_data, y_data, data_row_size, step_size, word_dict_size,shuffle=True):
    x_data, y_data = sliding_window(x_data, y_data, data_row_size, step_size=step_size)
    x_data = normalizeData(x_data, word_dict_size)
    x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=cfg.test_size, shuffle=shuffle, random_state=42)
    return x_train, x_test, y_train, y_test


def random(samples, labels):
    # 将样本和标签打乱，其中axis=0表示对第0维（样本数）打乱
    m = samples.shape[0]
    idx = np.arange(m)
    np.random.shuffle(idx)
    samples = samples[idx]
    labels = labels[idx]
    return samples, labels

def normalizeData(x,n):
    if cfg.layer_norm:
        x = layer_normalize(x)
    else:
        x = batch_normalize(x)
    if "Embedding" in cfg.net:
        # map to 0 to n
        return sigmoid(x)*n
    return x

def batch_normalize(x):
    # normalize data
    mean = x.mean(axis=0)
    std = x.std(axis=0)
    normalized_x = (x - mean) / (std)
    return normalized_x

def layer_normalize(x):
    # normalize data
    mean = x.mean(axis=-1).reshape(-1, 1)
    mean = np.broadcast_to(mean, (x.shape[0], x.shape[1]))
    std = x.std(axis=-1).reshape(-1, 1)
    std = np.broadcast_to(std, (x.shape[0], x.shape[1]))
    normalized_x = (x - mean) / (std)
    return normalized_x

def sigmoid(x):
    return 1 / (1 + np.exp(-x))


def repeat_samples(samples, labels, n):
    repeated_samples = np.tile(samples, (n, 1))
    repeated_labels = np.tile(labels, n)
    return repeated_samples, repeated_labels

def numpy_to_tensor(x,y):
    x = numpy_to_tensor_data(x)
    y = torch.from_numpy(y).float().cuda()
    return x, y

def numpy_to_tensor_data(x):
    if "Embedding" in cfg.net:
        x = torch.from_numpy(x).long().to().cuda()
    else:
        x = torch.from_numpy(x).float().to().cuda()
    return x

if __name__ == "__main__":
    data = np.array([[1000, 900, 800, 700], [1000, 5000, -600, 0], [1000, 800, 9,0]])
    x = normalizeData(data, 10000)
    print(x)


    # positive_data = np.random.rand(10, 5)  # 生成10个正样本，每个样本有5个特征
    # negative_data = np.random.rand(15, 5)  # 生成15个负样本，每个样本有5个特征
    #
    # positive_labels = np.ones(10)  # 为正样本创建标签
    # negative_labels = np.zeros(15)  # 为负样本创建标签
    # data_row_size = 3  # 每一行的样本数量
    # step = 1  # 步长
    # x_train, x_test, y_train, y_test = consolidate_dataset(positive_data, negative_data, positive_labels,
    #                                                        negative_labels, data_row_size, step)
    # print(x_train.shape)  # 训练数据的形状
    # print(x_test.shape)  # 测试数据的形状
    # print(y_train.shape)  # 训练标签的形状
    # print(y_test.shape)  # 测试标签的形状
