import numpy as np
import scipy.sparse as sp
import pymysql
import pandas as pd
from datetime import date

from data_utils import load_data, map_data, download_dataset


def normalize_features(feat):

    degree = np.asarray(feat.sum(1)).flatten()

    # set zeros to inf to avoid dividing by zero
    degree[degree == 0.] = np.inf

    degree_inv = 1. / degree
    degree_inv_mat = sp.diags([degree_inv], [0])
    feat_norm = degree_inv_mat.dot(feat)

    if feat_norm.nnz == 0:
        print('ERROR: normalized adjacency matrix has only zero entries!!!!!')
        exit

    return feat_norm


def preprocess_user_item_features(u_features, v_features):
    """
    Creates one big feature matrix out of user features and item features.
    Stacks item features under the user features.
    """

    zero_csr_u = sp.csr_matrix((u_features.shape[0], v_features.shape[1]), dtype=u_features.dtype)
    zero_csr_v = sp.csr_matrix((v_features.shape[0], u_features.shape[1]), dtype=v_features.dtype)

    u_features = sp.hstack([u_features, zero_csr_u], format='csr')
    v_features = sp.hstack([zero_csr_v, v_features], format='csr')

    return u_features, v_features


def globally_normalize_bipartite_adjacency(adjacencies, verbose=False, symmetric=True): # THIS PERFORMS NORMALIZATION WITH c_ij
    """ Globally Normalizes set of bipartite adjacency matrices """

    if verbose:
        print('Symmetrically normalizing bipartite adj')
    # degree_u and degree_v are row and column sums of adj+I

    adj_tot = np.sum(adj for adj in adjacencies)
    degree_u = np.asarray(adj_tot.sum(1)).flatten()
    degree_v = np.asarray(adj_tot.sum(0)).flatten()

    # set zeros to inf to avoid dividing by zero
    degree_u[degree_u == 0.] = np.inf
    degree_v[degree_v == 0.] = np.inf

    degree_u_inv_sqrt = 1. / np.sqrt(degree_u)
    degree_v_inv_sqrt = 1. / np.sqrt(degree_v)
    degree_u_inv_sqrt_mat = sp.diags([degree_u_inv_sqrt], [0])
    degree_v_inv_sqrt_mat = sp.diags([degree_v_inv_sqrt], [0])

    degree_u_inv = degree_u_inv_sqrt_mat.dot(degree_u_inv_sqrt_mat)

    if symmetric:
        adj_norm = [degree_u_inv_sqrt_mat.dot(adj).dot(degree_v_inv_sqrt_mat) for adj in adjacencies]

    else:
        adj_norm = [degree_u_inv.dot(adj) for adj in adjacencies]

    return adj_norm


def sparse_to_tuple(sparse_mx):
    """ change of format for sparse matrix. This format is used
    for the feed_dict where sparse matrices need to be linked to placeholders
    representing sparse matrices. """

    if not sp.isspmatrix_coo(sparse_mx):
        sparse_mx = sparse_mx.tocoo()
    coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
    values = sparse_mx.data
    shape = sparse_mx.shape
    return coords, values, shape


def load_official_trainvaltest_split(testing=False):

    conn = pymysql.connect(host='localhost', user='root', passwd='123456', db='movielens')
    sql_set_user = "UPDATE movierecommend_user SET is_newcomer=FALSE WHERE id IN (SELECT t.id FROM" \
                   "(SELECT DISTINCT(movierecommend_user.id) FROM movierecommend_user,movierecommend_rating WHERE " \
                   "movierecommend_user.id=movierecommend_rating.user_id_id AND is_newcomer=TRUE AND is_staff=FALSE AND " \
                   "rating in(1,2,3,4,5))t)"
    sql_set_movie = "UPDATE movierecommend_movie SET is_newcomer=FALSE WHERE id IN (SELECT t.id FROM(SELECT " \
                    "DISTINCT(movierecommend_movie.id) FROM movierecommend_movie,movierecommend_rating " \
                    "WHERE movierecommend_movie.id=movierecommend_rating.movie_id_id AND is_newcomer=TRUE AND " \
                    "rating in(1,2,3,4,5))t)"
    cursor = conn.cursor()
    print(cursor.execute(sql_set_user)) #将新用户和物品加入继续训练，新用户是已经做出评分的新用户，同样新物品也是被评分的
                                        # ，以此取消对其的用户冷启动和物品冷启动
    print(cursor.execute(sql_set_movie))
    conn.commit()
    sql_query = "SELECT user_id_id, movie_id_id, rating FROM " \
                "movierecommend_user, movierecommend_movie, movierecommend_rating WHERE " \
                "movierecommend_user.id=movierecommend_rating.user_id_id AND " \
                "movierecommend_movie.id=movierecommend_rating.movie_id_id AND " \
                "movierecommend_user.is_staff=FALSE and movierecommend_user.is_newcomer=FALSE and " \
                "movierecommend_movie.is_newcomer=FALSE;"

    dtypes = {
        'u_nodes': np.int32, 'v_nodes': np.int32,
        'ratings': np.float32, 'timestamp': np.float64}

    filename_test = 'data/ml_100k/u1.test'  #选取测试集
    data_train = pd.read_sql(sql_query, con=conn)
    data_train.rename(columns={
        'user_id': 'u_nodes',
        'movie_id': 'v_nodes',
        'rating': 'ratings',
    }, inplace=True)
    sep = '\t'
    data_test = pd.read_csv(
        filename_test, sep=sep, header=None,
        names=['u_nodes', 'v_nodes', 'ratings', 'timestamp'], dtype=dtypes)
    data_test = data_test[['u_nodes', 'v_nodes', 'ratings']]
    data_array_train = data_train.values.tolist()
    data_array_train = np.array(data_array_train)
    data_array_test = data_test.values.tolist()
    data_array_test = np.array(data_array_test)

    data_array = np.concatenate([data_array_train, data_array_test], axis=0)

    u_nodes_ratings = data_array[:, 0].astype(dtypes['u_nodes'])
    v_nodes_ratings = data_array[:, 1].astype(dtypes['v_nodes'])
    ratings = data_array[:, 2].astype(dtypes['ratings'])

    u_nodes_ratings, u_dict, num_users = map_data(u_nodes_ratings)
    v_nodes_ratings, v_dict, num_items = map_data(v_nodes_ratings)

    u_nodes_ratings, v_nodes_ratings = u_nodes_ratings.astype(np.int64), v_nodes_ratings.astype(np.int32)
    ratings = ratings.astype(np.float64)

    u_nodes = u_nodes_ratings
    v_nodes = v_nodes_ratings

    neutral_rating = -1  # int(np.ceil(np.float(num_classes)/2.)) - 1

    # assumes that ratings_train contains at least one example of every rating type
    rating_dict = {r: i for i, r in enumerate(np.sort(np.unique(ratings)).tolist())}

    labels = np.full((num_users, num_items), neutral_rating, dtype=np.int32)
    labels[u_nodes, v_nodes] = np.array([rating_dict[r] for r in ratings])

    for i in range(len(u_nodes)):
        assert(labels[u_nodes[i], v_nodes[i]] == rating_dict[ratings[i]])

    labels = labels.reshape([-1])

    # number of test and validation edges, see cf-nade code

    num_train = data_array_train.shape[0]
    num_test = data_array_test.shape[0]
    num_val = int(np.ceil(num_train * 0.2))
    num_train = num_train - num_val

    pairs_nonzero = np.array([[u, v] for u, v in zip(u_nodes, v_nodes)])
    idx_nonzero = np.array([u * num_items + v for u, v in pairs_nonzero])

    for i in range(len(ratings)):
        assert(labels[idx_nonzero[i]] == rating_dict[ratings[i]])

    idx_nonzero_train = idx_nonzero[0:num_train+num_val]
    idx_nonzero_test = idx_nonzero[num_train+num_val:]

    pairs_nonzero_train = pairs_nonzero[0:num_train+num_val]
    pairs_nonzero_test = pairs_nonzero[num_train+num_val:]

    # Internally shuffle training set (before splitting off validation set)
    rand_idx = list(range(len(idx_nonzero_train)))
    np.random.seed(42)
    np.random.shuffle(rand_idx)
    idx_nonzero_train = idx_nonzero_train[rand_idx]
    pairs_nonzero_train = pairs_nonzero_train[rand_idx]

    idx_nonzero = np.concatenate([idx_nonzero_train, idx_nonzero_test], axis=0)
    pairs_nonzero = np.concatenate([pairs_nonzero_train, pairs_nonzero_test], axis=0)

    val_idx = idx_nonzero[0:num_val]
    train_idx = idx_nonzero[num_val:num_train + num_val]
    test_idx = idx_nonzero[num_train + num_val:]

    assert(len(test_idx) == num_test)

    val_pairs_idx = pairs_nonzero[0:num_val]
    train_pairs_idx = pairs_nonzero[num_val:num_train + num_val]
    test_pairs_idx = pairs_nonzero[num_train + num_val:]

    u_test_idx, v_test_idx = test_pairs_idx.transpose()
    u_val_idx, v_val_idx = val_pairs_idx.transpose()
    u_train_idx, v_train_idx = train_pairs_idx.transpose()

    # create labels
    train_labels = labels[train_idx]
    val_labels = labels[val_idx]
    test_labels = labels[test_idx]

    if testing:
        u_train_idx = np.hstack([u_train_idx, u_val_idx])
        v_train_idx = np.hstack([v_train_idx, v_val_idx])
        train_labels = np.hstack([train_labels, val_labels])
        # for adjacency matrix construction
        train_idx = np.hstack([train_idx, val_idx])

    # make training adjacency matrix
    rating_mx_train = np.zeros(num_users * num_items, dtype=np.float32)
    rating_mx_train[train_idx] = labels[train_idx].astype(np.float32) + 1.
    rating_mx_train = sp.csr_matrix(rating_mx_train.reshape(num_users, num_items))

    class_values = np.sort(np.unique(ratings))

    # movie features (genres)

    sql_query = "SELECT id, genre FROM movierecommend_movie WHERE is_newcomer=FALSE"
    movie_df =pd.read_sql(sql_query, conn)

    num_genres = len(movie_df['genre'][0])

    v_features = np.zeros((num_items, num_genres), dtype=np.float32)
    for movie_id, g_vec in zip(movie_df['id'].values.tolist(), movie_df['genre'].values.tolist()):
        # check if movie_id was listed in ratings file and therefore in mapping dictionary
        g_vec = list(map(int, list(g_vec)))
        if movie_id in v_dict.keys():
            v_features[v_dict[movie_id], :] = g_vec
    # user features

    sql_query = "SELECT id, date_born, gender, occupation FROM movierecommend_user WHERE is_staff=FALSE and " \
                "is_newcomer=FALSE"
    users_df = pd.read_sql(sql_query, conn)
    conn.close()
    users_df['date_born'] = users_df['date_born'].map(lambda date_: int(date.today().year - date_.year))
    users_df.rename(columns={'date_born': 'age'}, inplace=True)

    occupation = ['administrator', 'artist', 'doctor', 'educator', 'engineer', 'entertainment', 'executive',
                  'healthcare', 'homemaker', 'lawyer', 'librarian', 'marketing', 'programmer', 'retired',
                  'salesman', 'scientist', 'student', 'technician', 'writer', 'other', 'none']
    age = users_df['age'].values
    age_max = age.max()

    occupation_dict = {f: i for i, f in enumerate(occupation, start=2)}
    num_feats = 2 + len(occupation_dict)

    u_features = np.zeros((num_users, num_feats), dtype=np.float32)
    for _, row in users_df.iterrows():
        u_id = row['id']
        if u_id in u_dict.keys():
            # age
            u_features[u_dict[u_id], 0] = row['age'] / np.float(age_max)
            # gender
            u_features[u_dict[u_id], 1] = float(row['gender'])
            # occupation
            u_features[u_dict[u_id], occupation_dict[row['occupation']]] = 1.

    u_features = sp.csr_matrix(u_features)
    v_features = sp.csr_matrix(v_features)

    print("User features shape: "+str(u_features.shape))
    print("Item features shape: "+str(v_features.shape))

    '''
        u_features为压缩后的稀疏矩阵column分别为age、gender、occupation[0]...occupation[20]（age为与max的比值，gender中
        0：M、1：F，occupation中21种从业以00..000表示）
        v_features为压缩后的稀疏矩阵column为18中电影的genre
        rating_mx_train为压缩后稀疏矩阵格式为：(x,x) x
        u_train_idx和v_train_idx分别是训练的rating中user的索引和item的索引（rating的真实值为index+1）
        train_labels为rating中rating标签（真实值为train_labels+1）
        构成[u_train_idx,v_train_idx,train_labels]=[user_id-1, item_id-1, rating-1]
        其它为验证和测试，格式都一样
    '''
    return u_features, v_features, rating_mx_train, train_labels, u_train_idx, v_train_idx, \
        val_labels, u_val_idx, v_val_idx, test_labels, u_test_idx, v_test_idx, class_values

