from mbkEGES.Generate_Graph import Graph
from mbkEGES.GetSample import Getsample, integration
from mbkEGES.model import Model
from mbkEGES.DataBatch import get_dataloader
from tqdm import tqdm
import torch
import torch.optim as optim

class EGes():
    def __init__(self, click_df, user_colname, item_colname, time_colname, item_detail, sidenames,
                    walklength, walknumber,out_dim = 50, percent = 55, window = 1, negative_rate = 1,
                    one_or_two = 0):
        """
        click_df: 用户行为数据 dataframe格式
        item_detial: 物品详情数据 dataframe格式
        sidenames:表示边缘信息的列明, 例如 :['item_id', 'category', 'word_counts']
        user_colname: 字符串, click_df中userid的列名
        item_colname: 字符串, click_df中itemid的列名
        time_colname: 字符串, click_df中点击时间的列名
        out_dim: item的embedding输出维度
        percent: 时间节点的相邻间隔： 0——100%
        walklength: 随机游走的长度
        walknumber: 每个节点随机游走的次数
        window: sikpgram算法中的窗口大小
        negative_rate: 负样例/正样例的大小
        one_or_two: 默认0, 选择单向图还是双向图
        """
        self.out_dim = out_dim
        
        self.grapher = Graph(click_df, user_colname, item_colname, time_colname, item_detail, sidenames, percent)
        self.sampler = Getsample(self.grapher, walklength, walknumber, window, negative_rate, one_or_two)
        self.all_samples = self.sampler.positive_negative_sample()
    
    def train(self, batch_size = 200, lr = 0.001, scheduler_lr = 0.4, epochs = 1, print_number = 30, scheduler_number = 6):
        '''
        lr: 初始学习率
        scheduler_lr: 调节器的调节参数
        epochs:训练次数
        scheduler_number: 每轮的学习率更新次数
        '''
        self.batch_size = batch_size
        self.lr = lr
        self.scheduler_lr = scheduler_lr
        self.epochs = epochs
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        device1 = torch.device('cpu')
        trainmodel = Model(self.grapher, self.sampler, self.out_dim).to(device)
        optimizer = optim.Adam(trainmodel.parameters(), lr=self.lr) #优化器
        scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, self.scheduler_lr)
        print_time = (len(self.all_samples)//batch_size)//print_number
        scheduler_time = (len(self.all_samples)//batch_size)//scheduler_number
        for epoch in range(self.epochs):
            for idx, samples in enumerate(get_dataloader(self.batch_size, self.all_samples)):
                cen_items, con_items, side_infor_list, labels = integration(samples)
                pre = trainmodel(cen_items, con_items, side_infor_list)
                optimizer.zero_grad()
                loss = trainmodel.loss(pre, labels)
                loss.backward()
                optimizer.step()
                if idx%print_time == 0:
                    print(f'当前损失为{loss.item()}')
                if idx%scheduler_time == 0:
                    scheduler.step()
        self.model = trainmodel.to(device1)
    '''
    注意模型训练好之后,例如:
    eges = EGes(df_train, 'user_id' ,'click_article_id' ,'click_timestamp',df_article, sidenames,
            walklength=8, walknumber=5, out_dim = 40, percent = 58, window = 1)
    eges.train(batch_size = 100, lr = 0.01, scheduler_lr = 0.5, epochs = 1, print_number = 30, scheduler_number = 9)
    此时,所有的embedding都存储在eges.model中.
    例如:
    存储在图G中的items的embedding: eges.model.center_item_emb
    item边缘信息embedding: eges.model.all_side_emb
    权重embedding: eges.model.A
    图G中item的名称序号: eges.grapher.index_dict
    图G中item对应的边缘信息字典: eges.grapher.item_side_dict
    边缘信息的index转化字典: eges.grapher.side_information_dict
    '''