import importlib
from ast import literal_eval

import pandas as pd
import torch
from torch.utils.data import Dataset

# try:
#     config = getattr(importlib.import_module('config'), f"{model_name}Config")
# except AttributeError:
#     print("dataset attr error")
#     print(f"{model_name} not included!")
#     exit()

config = getattr(importlib.import_module('config'), "MyModelConfig")


class BaseDataset(Dataset):
    def __init__(self, behaviors_path, news_path):
        """
        behavior_path : behaviors_parsed.tsv path
        news_path : news_parsed.tsv path
        """
        super(BaseDataset, self).__init__()
        assert all(attribute in [
            'category', 'subcategory', 'title', 'abstract', 'title_entities',
            'abstract_entities'
        ] for attribute in config.dataset_attributes['news'])
        assert all(attribute in ['user', 'clicked_news_length']
                   for attribute in config.dataset_attributes['record'])

        """
        behavior_parsed.tsv
        user: 用户ID，转化成Int后的id
        clicked_news: 用户历史记录 原始新闻id
        candidate_news:候补新闻 原始新闻id
        clicked：候补新闻的Label 
        """
        self.behaviors_parsed = pd.read_table(behaviors_path)

        # 取出对应模型需要的news特征
        self.news_parsed = pd.read_table(
            news_path,
            index_col='id',
            usecols=['id'] + config.dataset_attributes['news'],
            converters={
                attribute: literal_eval
                for attribute in set(config.dataset_attributes['news']) & set([
                    'title', 'abstract', 'title_entities', 'abstract_entities'
                ])
            })
        self.news_id2int = {x: i for i, x in enumerate(self.news_parsed.index)}  # 为news id 做一个id2int
        self.news2dict = self.news_parsed.to_dict('index')  # pandas.to_dict(),把index作为key，values作为value

        # 把特征值转为tensor
        for key1 in self.news2dict.keys():
            for key2 in self.news2dict[key1].keys():
                self.news2dict[key1][key2] = torch.tensor(
                    self.news2dict[key1][key2])

        padding_all = {
            'category': 0,
            'subcategory': 0,
            'title': [0] * config.num_words_title,
            'abstract': [0] * config.num_words_abstract,
            'title_entities': [0] * config.num_words_title,
            'abstract_entities': [0] * config.num_words_abstract
        }
        for key in padding_all.keys():
            padding_all[key] = torch.tensor(padding_all[key])

        self.padding = {
            k: v
            for k, v in padding_all.items()
            if k in config.dataset_attributes['news']
        }

    def __len__(self):
        return len(self.behaviors_parsed)

    def __getitem__(self, idx):
        """
        idx : behavior_parsed index
        """
        item = {}
        row = self.behaviors_parsed.iloc[idx]
        if 'user' in config.dataset_attributes['record']:
            item['user'] = row.user
        item["clicked"] = list(map(int, row.clicked.split()))
        # 将新闻原始id转为int
        item["candidate_news"] = [
            self.news2dict[x] for x in row.candidate_news.split()
        ]
        item["clicked_news"] = [
            self.news2dict[x]
            for x in row.clicked_news.split()[:config.num_clicked_news_a_user]
        ]

        if 'clicked_news_length' in config.dataset_attributes['record']:
            item['clicked_news_length'] = len(item["clicked_news"])
        repeated_times = config.num_clicked_news_a_user - \
                         len(item["clicked_news"])  # \ 换行说明
        assert repeated_times >= 0
        item["clicked_news"] = [self.padding
                                ] * repeated_times + item["clicked_news"]

        return item
