import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from itertools import chain
import jieba
import jieba.posseg as pseg
from wordcloud import WordCloud

def dm01_label_sns_countplot():
    plt.style.use('fivethirtyeight')
    train_df = pd.read_csv(filepath_or_buffer='./cn_data/train.tsv', sep='\t')
    dev_df = pd.read_csv(filepath_or_buffer='./cn_data/dev.tsv', sep='\t')

    sns.countplot(x='label',data=train_df)
    plt.title('train')
    plt.show()

    sns.countplot(x='label',data=dev_df)
    plt.title('dev')
    plt.show()

def dm02_len_sns_countplot_displot():
    plt.style.use('fivethirtyeight')
    train_df = pd.read_csv(filepath_or_buffer='./cn_data/train.tsv', sep='\t')
    dev_df = pd.read_csv(filepath_or_buffer='./cn_data/dev.tsv', sep='\t')
    train_df['sentence_length'] = list(map(lambda x:len(x),train_df['sentence']))
    sns.countplot(x='sentence_length',data=train_df)
    plt.xticks([])
    plt.show()

    sns.displot(x='sentence_length',data=train_df)
    plt.yticks([])
    plt.show()

def dm03_sns_stripplot():
    plt.style.use('fivethirtyeight')
    train_df = pd.read_csv(filepath_or_buffer='./cn_data/train.tsv', sep='\t')
    dev_df = pd.read_csv(filepath_or_buffer='./cn_data/dev.tsv', sep='\t')

    train_df['sentence_length'] =  list( map(lambda x:len(x),train_df['sentence']))
    sns.stripplot(y='sentence_length',x='label',data=train_df)
    plt.show()

def dm04_cal_wordcount():
    train_df = pd.read_csv(filepath_or_buffer='./cn_data/train.tsv',sep='\t')
    dev_data = pd.read_csv(filepath_or_buffer='./cn_data/dev.tsv', sep='\t')

    obj = map(lambda x:jieba.lcut(x),train_df['sentence'])
    train_vocab = set( chain( *obj ) )

    dev_vocab = set(chain(*map(lambda x:jieba.lcut(x),dev_data['sentence'])))
    print('训练集单词总数',len(train_vocab))
    print('测试集单词总数', len(dev_vocab))


def get_a_list(text):
    r = []
    for g in pseg.lcut(text):
        if g.flag == 'a':
            r.append(g.word)
    return r

def get_word_cloud(keywords_list):
    wordcloud = WordCloud(font_path='./cn_data/SimHei.ttf',max_words=100,min_word_length=2,background_color='white')
    keyword_str = " ".join(keywords_list)
    wordcloud.generate(keyword_str)
    plt.figure()
    plt.imshow(wordcloud,interpolation='bilinear')
    plt.axis('off')
    plt.show()

def dm05_word_cloud():
    train_data = pd.read_csv(filepath_or_buffer='./cn_data/train.tsv',sep='\t')
    p_train_data = train_data[train_data['label']==1]['sentence']
    p_a_train_vocab = chain(*map(lambda x:get_a_list(x),p_train_data))
    get_word_cloud(p_a_train_vocab)
    print('*' * 60)

    p_train_data = train_data[train_data['label'] == 0]['sentence']
    p_a_train_vocab = chain(*map(lambda x: get_a_list(x), p_train_data))
    get_word_cloud(p_a_train_vocab)

def test_chain():
    b1 = [1,2,3,4]
    b2 = ['x','y','z']
    c = chain(b1,b2)
    print(list(c))

    a = [(1,'a'),(2,'b'),(3,'c')]
    a1 = chain(*a)
    print(list(a1))



if __name__ == '__main__':
    test_chain()