# -*-coding:utf-8-*-
import jieba
import os
import pandas as pd
import re
import numpy as np

def jieba_add(arr):
    for w in arr:
        jieba.add_word(w)

def load_stop_words(corpus_path='corpus'):
    with open(os.path.join(corpus_path,'stopwords.txt')) as f:
        stopwords = f.read().split('\n')
        return stopwords

def word_valid(ws, sep=''):
        return [w.strip() for w in ws.split(sep) if w]


class SentenceCut:
    '''停用词切分句子，并且判断对词语进行合法性判断'''
    def __init__(self,stopwords):
        self.stopwords = stopwords

    def cut(self, s):
            return [i for i in jieba.cut(s) if self.is_valid(i)]

    def is_valid(self, s):
        return s not in self.stopwords and re.match('[\d\s’!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~]+', s) == None



class TextFilter:
    '''必须传入一个SentenceCut对象'''

    '''将一片长篇文章， 转换成核心句组成的文章
    分句后， 每个句子过滤。 将剩下的核心句组成文章。
    句子最短长度为8。文章最短长度为30， 最长长度为50
    '''

    def __init__(self,sentence_cut,corpus_path = 'corpus'):
        self.minSentence = 5
        self.minArticle = 20
        self.maxArticle = 100
        self.corpus_path = corpus_path
        self.sc = sentence_cut
        self.load_filter_words(self.corpus_path)
    
 

    def load_filter_words(self,corpus_path):
        '''加载预定义的关键词表文件, 关键词可人工收集， 也可以用tf-idf算法从训练集中提取。（本系统使用的是tf-idf提取的关键词）
        '''
        with open(os.path.join(corpus_path,'vegetable_list'),'r') as f:
            self.veg_words = set(word_valid(f.read(), ' '))

            
        with open(os.path.join(self.corpus_path,'city_list'),'r') as f:
            self.city_words = set(word_valid(f.read(), ' '))
        
        indList = ['供需', '政策', '自然环境']
        self.ind_words = set()
        for ind in indList:
            with open('corpus/%s'%ind,'r') as f:
                self.ind_words |= set(word_valid(f.read(), '\n'))
        self.ind_words |= set(['减', '增', '变化', '涨', '跌', '下调','走高', '回落', '平稳'])
        self.ind_words |= self.veg_words
        # jieba_add(self.veg_words)
        jieba_add(self.city_words)
        jieba_add(self.ind_words)

    '''处理文本数据， 处理后为空则删除'''
    def filterDataFrame(self, df, field='Text'):
        df[field] = df[field].apply(self.rebuild)
        df.replace(to_replace='', value = pd.np.nan, inplace=True)
        df.dropna(subset=[field],inplace=True)
        df.reset_index(drop=True,inplace=True)
        return df

    '''从cols中查找是否有蔬菜和城市数据， 如果有则保留， 并存入veg,city字段，否则删除'''
    def vegCityFilterDataFrame(self, df, cols=['Title','Text']):
        CityList = []
        VegList = []

        def getItem(line, st):
            for col in cols:
                s = line[col]
                s = list(jieba.cut(s))
                for w in s:
                    if w in st:
                        return w
            return pd.np.nan

        for i in df.index:
            CityList.append(getItem(df.loc[i],self.city_words))
            VegList.append(getItem(df.loc[i],self.veg_words))
    
        df['City'] = pd.Series(CityList)
        df['Veg'] = pd.Series(VegList)
        df.dropna(subset=['City','Veg'],inplace=True)
        df.reset_index(drop=True,inplace=True)
        return df


    def rebuild(self, article):
        # 将句子切分， 过滤掉不重要的句子后重组
        punc = '[ .?!。 ？！]'
        sentences = re.split(punc,article.strip())
        validSentence = [i.strip()  for i in sentences if self.isSentenceValid(i)]
        artLength = 0

        retArt = ''
        for s in validSentence:
            artLength+=len(s)
            if (artLength > self.maxArticle):
                break
            retArt += s
        return retArt

    def isSentenceValid(self, s):
        if len(s) < self.minSentence:
            return False
        for w in self.ind_words:
            if w in s:
                return True
        return False




class VegDB:
    '''DATABASE'''
    '''数据预处理，变成计算机能够看得懂的语言'''
    '''必须传入一个SentenceCut对象'''
    def __init__(self,sentence_cut,vocab_path=''):
        if vocab_path != '':
            with open(vocab_path, 'r') as fin:
                self.vocab = fin.read().split(' ')
                self.generate_index()
        self.sc = sentence_cut

    def generate_index(self):
        self.vocab_size = len(self.vocab)
        self.word_index = dict((v, i) for i, v in enumerate(self.vocab))

    def create_vocab(self, df, data_field):
        datas = [self.sc.cut(s) for s in list(df[data_field]) if s]
        self.vocab = ['<PAD>', '<UNK>']
        for i in datas:
            self.vocab.extend(i)
        self.vocab = list(set(self.vocab))
        self.generate_index()
        with open('model/vocab', 'w') as fout:
            fout.write(' '.join(self.vocab))

    '''获得训练数据
    输入是一个dataframe等
    输出是训练集、数据集、还有附加信息
    '''

    def get_train_data_from_dataframe(self, df, data_field, cate_field, shuffle=True, test_rate=0.1):
        df = df[(df[cate_field] == '正向') | (df[cate_field] == '负向')]
        datas = [self.encode_sentence(self.sc.cut(s)) for s in list(df[data_field]) if s]
        label_index = {'正向': 0, '负向': 1}  # dict((v, i) for i, v in enumerate(set(labels)))
        labels = [label_index[s] for s in list(df[cate_field])]
        assert(len(datas) == len(labels))
        if shuffle:
            state = np.random.get_state()
            np.random.shuffle(datas)
            np.random.set_state(state)
            np.random.shuffle(labels)
        boundary = int(len(labels) * (1 - test_rate))
        return (datas[:boundary], labels[:boundary]), (datas[boundary:], labels[boundary:])
    
    def get_train_data_from_dataframe2(self, df, data_field, cate_field, shuffle=True, test_rate=0.1):
        df = df[(df[cate_field] == '供给') | (df[cate_field] == '需求') | (df[cate_field] == '自然环境')]
        datas = [self.encode_sentence(self.sc.cut(s)) for s in list(df[data_field]) if s]
        label_index = {'供给': 0, '需求': 1, '自然环境':2}  # dict((v, i) for i, v in enumerate(set(labels)))
        labels = [label_index[s] for s in list(df[cate_field])]
        assert(len(datas) == len(labels))
        if shuffle:
            state = np.random.get_state()
            np.random.shuffle(datas)
            np.random.set_state(state)
            np.random.shuffle(labels)
        boundary = int(len(labels) * (1 - test_rate))
        return (datas[:boundary], labels[:boundary]), (datas[boundary:], labels[boundary:])
    '''编码预测数据
    输入是dataframe
    输出是编码后的数据
    '''

    def get_predict_data_from_dataframe(self, df, data_field):
        datas = [self.encode_sentence(self.sc.cut(s)) for s in list(df[data_field]) if s]
        return datas

    def get_word_index(self):
        return self.word_index

    def encode_sentence(self, s):
        # if i in self.word_index.keys() else 1
        return [self.word_index[i] if i in self.word_index.keys() else 1 for i in s]

    def decode_sentence(self, s):
        return [self.vocab[i] for i in s]


'''

def extract_from_corpus(from_path, data_field, cate_field, to_dir):
    df = pd.read_csv(from_path, sep='\t')
    cates = ['正向', '负向']
    for cate in cates:
        with open(os.path.join(to_dir, cate), 'w') as f:
            f.write('\n'.join([' '.join(cut(i)) for i in df[df[cate_field] == cate][data_field]]))
    return cates


'''

# if __name__ == "__main__":
#     # tft = Textfilter()
#     # df = pd.read_csv('corpus/corpus.csv')
#     # df['filter'] = df['文本'].apply(tf.rebuild)
#     # df.to_excel('test.xlsx')