# -*- coding: utf-8 -*-
import numpy as np
from collections import defaultdict

def special_list(nums, nums_p, c_p, N):
        # 对标签进行处理，避免物品纯随机分布
        # 前20%的物品相对于后80%的物品的出现概率倍数
        nps = (1 - nums_p) / nums_p * c_p / (1 - c_p)
        print("%.1f%%的物品出现概率是其他物品的%.2f倍"%(nums_p*100, nps))
        N_20 = int(nums * nums_p)               # 前20%的物品数量
        p = np.array([nps if i < N_20 else 1 for i in range(nums)])
        p /= sum(p)
        itemlist = np.random.choice(nums , N, p = p)
        return itemlist

# 构造10000条数据
def generate_label(user_nums = 200, item_nums = 400, label_nums = 1000, N = 10000, 
                   item_p = 0.2, i_p = 0.8,
                   label_p = 0.2, l_p = 0.4,
                   seed = 1234):
    '''
    user_nums: 用户数量
    item_nums: 物品数量
    N: 记录数
    item_p: 畅销产品，默认为20%
    i_p: 畅销产品销售数量占比，默认为80%，即 2-8 原则
    label_p: 热门标签，默认为20%
    l_p: 默认热门标签占比为 40%
    '''
    np.random.seed(seed)
    # 用户列表
    userlist = np.random.randint(user_nums, size = (N, 1))
    # 物品列表
    itemlist = np.random.randint(item_nums, size = (N, 1))
    label_list = special_list(label_nums, label_p, l_p, N)
    data = np.c_[userlist, itemlist, label_list]
    return data


# 生成随机数据
def generate_data(user_nums = 200, item_nums = 400, N = 10000, item_p = 0.2, p = 0.8, seed = 1234):
    """
    user_nums: 用户数量
    item_nums: 物品数量
    N: 记录数
    item_p: 畅销产品占比,默认20%
    p: 畅销产品销售数量占比,默认80%,即2-8原则
    """
    np.random.seed(seed)
    userlist = np.random.randint(user_nums, size = (N, 1))
    itemlist = special_list(item_nums, item_p, p, N)
    data = np.c_[userlist, itemlist]
    return data 

# 将列表转化为字典
def list2dict(data, user_item = True):
    """
    data: 用户-物品对
    user_item: 默认为True，返回每个user的item集合
    """
    dic = defaultdict(set)
    for user, item in data:
        if user_item:
            dic[user].add(item)
        else:
            dic[item].add(user)
    return dic
    
# 实验设计分割数据集
def SpliceData(data, M, k, seed = 1234):
    test = []
    train = []
    np.random.seed(seed)
    for user, item in data:
        if np.random.randint(0, M) == k:
            test.append([user, item])
        else:
            train.append([user, item])
    return train, test

# 加载电影数据
def loadfile(path):
    with open(path,"r") as f:
        for i,line in enumerate(f):
            yield line

def read_ratings(path,pivot = 0.8):
    """ 
    Return:  user|item 
    """
    train_set = []
    test_set = [] 
    
    for line in loadfile(path):
        user,movie,rating,_ = line.split("::")
        if np.random.rand() < pivot:
            train_set.append([user, movie])
        else:
            test_set.append([user, movie])
    
    return train_set,test_set