import jieba
import pandas as pd
from torchtext.legacy import data
def tokenizer(x):
    res = [w for w in jieba.cut(x)]
    return res

def getFeat():
    TEXT = data.Field(sequential=True, tokenize=tokenizer,fix_length=100)
    LABEL = data.Field(sequential=False,use_vocab=False)
    train = pd.read_csv("Train.csv")
    label_map = {'positive':0,'neutral':1,'negative':2}
    labels = []
    text = []
    for i in range(len(train['labels'])):
        if label_map.__contains__(train['labels'][i]):
            labels.append(label_map[train['labels'][i]])
            text.append(train['text'][i])

    train_label_map = pd.DataFrame({'text':text,'labels':labels})
    train_label_map.to_csv('Train_label.csv',index=False)
    train= data.TabularDataset(path = 'Train_label.csv',
                               format='csv',
                               skip_header=True,
                               fields=[('text',TEXT),('labels',LABEL)])
    test =  data.TabularDataset(path = 'Test.csv',
                                format='csv',
                                skip_header=True,
                                fields=[('text',TEXT)])
    TEXT.build_vocab(train)
    train_iter = data.Iterator(dataset=train, batch_size=256, shuffle=True, sort_within_batch=False, repeat=False)
    test_iter = data.Iterator(dataset=test, batch_size=256, shuffle=True, sort_within_batch=False, repeat=False)

    embed_num = len(TEXT.vocab)
    embed_dim = 100

    print('finish feat_CNN')

    return train_iter, test_iter, embed_num, embed_dim