from torchtext import data
from torchtext import vocab
import torch
import random
import os
import re

sentiment_cls = {'感动':0,'同情':1, '无聊':2,'愤怒':3,'搞笑':4,'难过':5,'新奇':6,'温馨':7}
r_sentiment_cls = {}
for k, v in sentiment_cls.items():
    r_sentiment_cls[v] = k

class NewsDataset(data.Dataset):
    '''处理data/sina下的Dateset
    '''
    def __init__(self, path, text_field, label_field,**kwargs):
        fields = [('datetime', None), ('sentiment', label_field), ('content', text_field)]
        examples = []
        #read from path
        with open(path, 'r',encoding='utf-8') as f:
            while True:
                news = f.readline().strip('\n')
                if not news:
                    break
                else:
                    examples.append(data.Example.fromlist(news.split('\t'), fields))
        super(NewsDataset,self).__init__(examples, fields, **kwargs)

#带有权重采样的迭代器
class WeightedIterator(data.Iterator):
    def __init__(self, sampler, *args, **kwargs):
        super(WeightedIterator, self).__init__(*args, **kwargs)
        self.sampler = sampler

    def __iter__(self):
        while True:
            self.init_epoch()
            for idx in range(len(self.dataset)//self.batch_size):
                selected_idx = list(self.sampler)
                minibatch = []
                for s_id in selected_idx:
                    minibatch.append(self.dataset[s_id])
                # fast-forward if loaded from state
                if self._iterations_this_epoch > idx:
                    continue
                self.iterations += 1
                self._iterations_this_epoch += 1
                if self.sort_within_batch:
                    if self.sort:
                        minibatch.reverse()
                    else:
                        minibatch.sort(key=self.sort_key, reverse=True)
                yield data.Batch(minibatch, self.dataset, self.device)
            if not self.repeat:
                return


def train_cls_stat(train_dataset):
    '''统计训练集中各样本的权重
    '''
    cls = torch.tensor([0.0] * len(sentiment_cls), requires_grad=False)
    for batch in train_dataset:
        label = batch.sentiment
        cls[label] += 1
    cls = -torch.log(1/cls)
    return 2/(cls / torch.min(cls))


def preprocess_content(x):
    '''x是一个example被tokenize后的对象  
        这里去掉
            -中文停用词
            -纯数字
            -表示年月时间
            -[本报、讯、记者]
    '''
    stopword_path = os.path.join(os.path.abspath('.'),'stopwords','cn_stopwords.txt')
    with open(stopword_path,'r',encoding='utf-8') as f:
        stopwords = f.readlines()
        stopwords = [word.strip() for word in stopwords]
    stopwords = set(stopwords)
    x = [word for word in x if word not in stopwords]
    x = [re.sub('[0-9]+[时分秒月年日早晚点]|[0-9]+|[a-zA-Z]', '', word) for word in x]
    return [word for word in x if word]


def preprocess_label_classification(x):
    '''用于分类的标签处理方案
    '''
    temp  = x.split()[1:]
    category = max(temp, key=lambda x:int(x[3:]))
    return sentiment_cls[category[:2]]

def preprocess_test_label(x):
    '''用于分类的标签处理方案
    '''
    temp  = x.split()[1:]
    vote = [int(e[3:]) for e in temp]
    sum_vote = sum(vote)
    return [e/sum_vote for e in vote]

def get_sample_weight(train_dataset, cls_weight):
    '''统计训练集中各样本的权重
    '''
    weight = torch.zeros(len(train_dataset))
    for i,example in enumerate(train_dataset):
        label = example.sentiment
        weight[i] = cls_weight[label]
    return weight

def create_dataset(path, validation=True, fixed_length = 720, pretrained_vec=True):
    '''创建train, validation和testdataset  
        -- path : 训练集所在的路径  
        -- validation : [Default:True]
                如果为True返回tuple(train, val, test)
                如果为False返回tuple(train, test)

        返回tuple(train[val,test]_dataset, vocab_size)
    '''
    TEXT = data.Field(sequential=True, use_vocab=True,preprocessing=preprocess_content, batch_first=True,
                        fix_length=fixed_length, pad_token='<pad>')
    LABEL = data.Field(sequential=False, use_vocab=False,preprocessing=preprocess_label_classification, batch_first=True)
    # TEST_LABEL = data.Field(sequential=False, use_vocab=False,preprocessing=preprocess_test_label, batch_first=True, dtype=torch.float)

    # train_val_dataset = NewsDataset(os.path.join(path, 'sina', 'sinanews.train'), text_field=TEXT, label_field=LABEL)
    # test_dataset = NewsDataset(os.path.join(path, 'sina', 'sinanews.test'), text_field=TEXT, label_field=TEST_LABEL)
    train_val_dataset, test_dataset = NewsDataset.splits(os.path.join(path, 'sina'), train='sinanews.train',test='sinanews.test',
                                                    text_field=TEXT, label_field=LABEL)
    train_dataset, val_dataset = train_val_dataset.split(split_ratio=0.9, stratified=True, strata_field = 'sentiment',
                                                        random_state=random.getstate())
    print('train size = {}, val_size = {}, test_size = {}'.format(len(train_dataset), len(val_dataset), len(test_dataset)))

    LABEL.build_vocab(train_dataset)
    LABEL.build_vocab(val_dataset)
    LABEL.build_vocab(test_dataset)
    # TEST_LABEL.build_vocab(test_dataset)
    if pretrained_vec:
        TEXT.build_vocab(train_val_dataset, vectors=vocab.Vectors(name=os.path.join(path, 'sgns.merge.word')))
    else:
        TEXT.build_vocab(train_val_dataset)

    if validation:
        return (train_dataset, val_dataset, test_dataset, TEXT)
    else:
        return (train_val_dataset, test_dataset, TEXT)

def create_iterator(train_dataset, val_dataset, test_dataset, weighted_sample=False, return_weight=False, batch_size=16):
    '''构建iterator，方便数据加载  
            -- train[val, test]_dataset : 对应的数据集
            -- weighted_sample : 训练集是否根据权重采样
        
        返回tuple(train_iter, val_iter, test_iter)
    '''
    cls_weight = train_cls_stat(train_dataset)
    if weighted_sample:
        train_example_weight = get_sample_weight(train_dataset, cls_weight)
        sampler = sampler = torch.utils.data.WeightedRandomSampler(train_example_weight, num_samples=batch_size, 
                                                                    replacement=False)
        train_iter = WeightedIterator(sampler, train_dataset, batch_size=batch_size,
                                        sort=False, sort_within_batch=False, repeat=False, shuffle=False)
        val_iter, test_iter = data.Iterator.splits([val_dataset, test_dataset], batch_sizes = [batch_size]*2,
                sort_key=lambda x:len(x.content), sort_within_batch=True,repeat=False, shuffle=True)
    else:
        train_iter, val_iter, test_iter = data.Iterator.splits([train_dataset, val_dataset, test_dataset], 
                batch_sizes= [batch_size]*3, sort_key=lambda x:len(x.content), sort_within_batch=True,repeat=False, shuffle=True)
    if not return_weight:
        return (train_iter, val_iter, test_iter)
    else:
        return (train_iter, val_iter, test_iter, cls_weight)