# coding:utf-8
'''
数字特征统计
@author:wy
'''
import pandas as pd
from collections import defaultdict
import xlwt
import numpy as np
import random
def statistics_tags(inf,sheet_names,outf):
    df = pd.read_csv(inf,encoding='utf-8')
    tags = defaultdict(int)
    for i,tag in enumerate(list(df['class'])):
        tags[tag] += 1
        print(i,'is statistics....')
    wb = xlwt.Workbook()
    ws = wb.add_sheet(sheet_names)
    ws.write(0,0,'tags')
    ws.write(0,1,'count')
    for i,key in enumerate(tags):
        print(i,'is writing.....')
        ws.write(i+1,0,key)
        ws.write(i+1,1,tags[key])
    wb.save(outf)

def statistics_length(inf,sheet_names,outf):
    df = pd.read_csv(inf,encoding='utf-8')
    length = defaultdict(int)
    wb = xlwt.Workbook()
    ws = wb.add_sheet(sheet_names)
    ws.write(0,0,'length')
    ws.write(0,1,'count')
    for i,word in enumerate(list(df['word_seg'])):
        word = word.split()
        length[len(word)] += 1
        print(i,'is statistics....')
    for i,key in enumerate(length.keys()):
        ws.write(i+1,0,key)
        ws.write(i+1,1,length[key])
    wb.save(outf)

def split_train_val_set(inf,train_out_path,val_out_path,val_rate=0.1):
    df = pd.read_csv(inf,encoding='utf-8')
    datas = np.asarray([df.iloc[i,:] for i in range(df.shape[0])])
    index = [i for i in range(len(datas))]
    random.shuffle(index)
    datas = list(datas[index])
    train_data = datas[0:int(len(datas)*(1-val_rate))]
    val_data = datas[int(len(datas)*(1-val_rate)):]
    train_data = pd.DataFrame(train_data,columns=['id','article','word_seg','class'])
    val_data = pd.DataFrame(val_data,columns=['id','article','word_seg','class'])
    train_data.to_csv(train_out_path,encoding='utf-8',index=False)
    val_data.to_csv(val_out_path, encoding='utf-8', index=False)

def statstic_words_nums(inf,outf):
    df = pd.read_csv(inf,encoding='utf-8')
    word_dict = defaultdict(int)
    lines = list(df['word_seg'])
    for line in lines:
        words = line.split()
        for word in words:
            word_dict[word] += 1
    word_df = pd.DataFrame([[key,word_dict[key]] for key in word_dict.keys()],columns=['word','count'])
    word_df.to_csv(outf,encoding='utf-8')

def get_idf_value(word_or_char,inf,outf):
    df = pd.read_csv(inf,encoding='utf-8')
    if word_or_char == 'word':
        lines = list(df['word_seg'])
    elif word_or_char == 'char':
        lines = list(df['article'])
    word_idf = defaultdict(int)
    for i,line in enumerate(lines):
        words = set(line.replace('\n','').strip().split(' '))
        for j,word in enumerate(words):
            word_idf[word] += 1
            print(i,j)

   # word_idf =dict(sorted(word_idf.items(),key=lambda x:x[1],reverse=True))
    word_idf = dict(sorted(word_idf.items(),key=lambda x:x[1],reverse=True))
    result = pd.DataFrame([[item,word_idf[item]] for item in word_idf])
   # result = pd.DataFrame([[item] for item in word_idf])
    result.to_csv(outf,encoding='utf-8',index=False)
def delete_stop_words(word_stop_inf,char_stop_inf,train_inf,outf):
    df = pd.read_csv(word_stop_inf,encoding='utf-8')
    stop_words = [str(df.iloc[i,0]) for i in range(df.shape[0]) if int(df.iloc[i,1])>=20000]
    df = pd.read_csv(char_stop_inf, encoding='utf-8')
    stop_chars = [str(df.iloc[i,0]) for i in range(df.shape[0]) if int(df.iloc[i,1])>=20000]
    df = pd.read_csv(train_inf,encoding='utf-8')
    word_lines = list(df['word_seg'])
    char_lines = list(df['article'])
    #labels = list(df['class'])
    result = []
    for i,line in enumerate(word_lines):
        words = line.replace('\n','').strip().split(' ')
        chars = char_lines[i].replace('\n', '').strip().split(' ')
        char_str = ''
        word_str = ''
        #word_str = ' '.join([word for word in words if word not in stop_words])
        #char_str = ' '.join([char for char in chars if char not in stop_chars])

        for word in words:
            if word not in stop_words:
                word_str += word
                word_str += ' '
                print('word:',i, word)
        for char in chars:
            if char not in stop_chars:
                char_str += char
                char_str += ' '
                print('char:',i,char)

        #result.append([i,char_str.strip(),word_str.strip(),labels[i]])
        result.append([i,char_str.strip(),word_str.strip()])
    #result_df = pd.DataFrame(result,columns=['id','article','word_seg','class'])
    result_df = pd.DataFrame(result,columns=['id','article','word_seg'])
    result_df.to_csv(outf,index=False,encoding='utf-8')
if __name__ == '__main__':
    #statistics_tags('~/share/train_set.csv','Sheet1','../data/temp/tag_statis_result.xls')
    #statistics_length('~/share/train_set.csv','Sheet1','../data/temp/length_statis_result.xls')
    #split_train_val_set('../data/temp/train_new.csv','../data/temp/train.csv',
    #                   '../data/temp/val.csv')
    #statstic_words_nums('./train_set.csv','./word_statstic.csv')
    #get_idf_value('char','../data/temp/train_set.csv','../data/temp/idf_value_char.csv')
    #get_idf_value('word','../data/temp/train_set.csv','../data/temp/idf_value.csv')
    #f1 = open(r'E:\数据挖掘与机器学习\竞赛\长文本分类\idf_value.csv')
    #f2 = open(r'E:\数据挖掘与机器学习\竞赛\长文本分类\idf_value_char.csv')
    #f3 = open(r'E:\数据挖掘与机器学习\竞赛\长文本分类\new_data\example.csv')
    #f4 = open(r'E:\数据挖掘与机器学习\竞赛\长文本分类\example_result.csv','w')
   delete_stop_words('../data/temp/idf_value.csv','../data/temp/idf_value_char.csv','../data/temp/test_set.csv','../data/temp/test_new.csv')
