#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Time    :   2021/05/18 10:02:12
@Author  :   Leo Wood 
@Contact :   leowood@foxmail.com
'''

import os
import re
import pandas as pd
from tqdm import tqdm


def has_chinese(text):
    if len(text) > 1:
        for char in text:
            if char >= '\u4e00' and char <= '\u9fa5':
                return True
    return False

def clean(text):
    text = re.sub(r"/{[a-z][a-z]","",text)
    text = text.strip()
    return text

def stat(path):
    count = 0
    for root,dirs,files in os.walk(path):
        for file in files:
            if '.txt' in file:
                file_name = os.path.join(root,file)
                print(file_name)
                with open(file_name,'r') as f:
                    lines = f.readlines()
                    count += len(lines)
    print(count)

def txt_to_csv(date):
    import os
    data_dict = {'用户昵称':[],'弹幕内容':[],'弹幕内容_清洗':[]}

    path = "/data/leo/Projects/bert/work/content_vetting/real-url-master/danmu/弹幕数据/{}".format(date)
    print(path)


    for root,dirs,files in os.walk(path):
        for file in files:
            if '.txt' in file:
                file_name = os.path.join(root,file)
                print(file_name)
                with open(file_name,'r',encoding='utf-8') as f:
                    for line in f.readlines():
                        line = line.strip()
                        if line:
                            line = line.split('：')
                            if len(line) == 2:
                                name = line[0]
                                text = line[1]
                                text_clean = clean(text)
                                if has_chinese(text_clean):
                                    if text_clean not in data_dict['弹幕内容_清洗']:
                                        data_dict['用户昵称'].append(name)
                                        data_dict['弹幕内容'].append(text)
                                        data_dict['弹幕内容_清洗'].append(text_clean)
    df = pd.DataFrame(data_dict)
    import os
    if not os.path.exists("/data/leo/Work/Wende/弹幕数据抓取/{}".format(date)):
        os.mkdir("/data/leo/Work/Wende/弹幕数据抓取/{}".format(date))
    df.to_csv("/data/leo/Work/Wende/弹幕数据抓取/{}/{}.csv".format(date, date),index=False,encoding='utf_8_sig')


def danmu_ly_to_csv():
    data_dict = {'弹幕内容':[],'弹幕内容_清洗':[]}

    file_name = "/data/leo/Work/Wende/弹幕数据抓取/danmu_ly/评论汇总.txt"
 
    with open(file_name,'r',encoding='utf-8') as f:
        for line in tqdm(f.readlines()):
            text = line.strip()
            text_clean = clean(text)
            if has_chinese(text_clean):
                if text_clean not in data_dict['弹幕内容_清洗']:
                    data_dict['弹幕内容'].append(text)
                    data_dict['弹幕内容_清洗'].append(text_clean)
    df = pd.DataFrame(data_dict)

    df.to_csv("/data/leo/Work/Wende/弹幕数据抓取/danmu_ly/danmu_liuyi.csv", index=False,encoding='utf_8_sig')


def original_danmu_to_csv(path):

    import os
    data_dict = {'用户昵称':[],'弹幕内容':[],'弹幕内容_清洗':[]}


    for root,dirs,files in os.walk(path):
        for file in files:
            if '.txt' in file:
                file_name = os.path.join(root,file)
                print(file_name)
                with open(file_name,'r',encoding='utf-8') as f:
                    for line in f.readlines():
                        line = line.strip()
                        if line:
                            line = line.split('：')
                            if len(line) == 2:
                                name = line[0]
                                text = line[1]
                                text_clean = clean(text)
                                if has_chinese(text_clean):
                                    if text_clean not in data_dict['弹幕内容_清洗']:
                                        data_dict['用户昵称'].append(name)
                                        data_dict['弹幕内容'].append(text)
                                        data_dict['弹幕内容_清洗'].append(text_clean)
    df = pd.DataFrame(data_dict)

    df.to_csv("{}/proceed_danmu_data.csv".format(path),index=False,encoding='utf_8_sig')


def merge_date_per_day(path):
    import os 
    df_all = ''
    i = 0
    for root,dirs,files in os.walk(path):
        for file in files:
            if '.csv' in file:
                file_name = os.path.join(root,file)
                print(file_name)
                df = pd.read_csv(file_name)
                print(len(df))
                if i == 0:
                    df_all = df
                else:
                    df_all = pd.concat([df_all,df],ignore_index=True)
                i += 1

    df_all = df_all[df_all['弹幕内容_预测结果_v6'] == 1]
    print(df_all)

    df_all['弹幕内容_预测得分_v6'] = df_all['弹幕内容_预测得分_v6'].astype('float')

    df_all.sort_values(by='弹幕内容_预测得分_v6',inplace=True,ascending=False)
    print(df_all)
    df_all = df_all.reset_index(drop=True)

    d_select = []
    for i in range(len(df_all)):
        text = df_all.iloc[i]['弹幕内容_清洗']
        if text not in d_select:
            d_select.append(text)
            if len(d_select) == 5000:
                break
    with open(path + '/select_5000.txt','w') as f:
        for i in d_select:
            f.write(i + '\n')


    # df_all['弹幕内容_清洗'].to_excel(path + '/result_all_5000.xlsx',engine='xlsxwriter',index=False)


def merge_date_per_day_0524_after_5000(path):
    import os 
    df_all = ''
    i = 0
    for root,dirs,files in os.walk(path):
        for file in files:
            if '.csv' in file:
                file_name = os.path.join(root,file)
                print(file_name)
                df = pd.read_csv(file_name)
                print(len(df))
                if i == 0:
                    df_all = df
                else:
                    df_all = pd.concat([df_all,df],ignore_index=True)
                i += 1

    df_all = df_all[df_all['弹幕内容_预测结果_v6'] == 1]
    print(df_all)


    df_all['弹幕内容_预测得分_v6'] = df_all['弹幕内容_预测得分_v6'].astype('float')

    df_all.sort_values(by='弹幕内容_预测得分_v6',inplace=True,ascending=False)
    print(df_all)
    df_all = df_all.reset_index(drop=True)

    select_5000 = []

    with open('/Users/leo/Data/项目数据/文德数慧-文本内容审核/分类实验/弹幕数据抓取/合计/0517_0524/select_5000.txt', 'r') as f:
        select_5000 = [line.strip() for line in f.readlines()]

    d_select = []
    for i in range(len(df_all)):
        text = df_all.iloc[i]['弹幕内容_清洗']
        if text not in d_select and text not in select_5000:
            d_select.append(text)

    with open(path + '/select_after_5000.txt','w') as f:
        for i in d_select:
            f.write(i + '\n')

def youtube_ly_to_csv(file_name):
    data_dict = {'弹幕内容':[],'弹幕内容_清洗':[]}

    
 
    with open(file_name,'r',encoding='utf-8') as f:
        for line in tqdm(f.readlines()):
            text = line.strip()
            text_clean = clean(text)
            if has_chinese(text_clean):
                if text_clean not in data_dict['弹幕内容_清洗']:
                    data_dict['弹幕内容'].append(text)
                    data_dict['弹幕内容_清洗'].append(text_clean)
    df = pd.DataFrame(data_dict)


    df['弹幕内容_清洗_'] = df['弹幕内容_清洗'].str.replace(' ','')
    df.drop_duplicates('弹幕内容_清洗_',inplace=True)

    df.to_csv("/data/leo/Work/Wende/弹幕数据抓取/youtube/youtube_fit_comments.csv", index=False,encoding='utf_8_sig')

def danmu_to_csv(path_in,path_out):
    data_dict = {'弹幕内容':[],'弹幕内容_清洗':[]}

    files = []
    for root,dirs,filenames in os.walk(path_in):
        for f in filenames:
            if '.txt' in f:
                files.append(os.path.join(root,f))
        
    for file_name in files:
        with open(file_name,'r',encoding='utf-8') as f:
            for line in tqdm(f.readlines()):
                line = line.strip()
                if line:
                    line = line.split('：')
                    if len(line) == 2:
                        text = line[1]
                        text_clean = clean(text)
                        if has_chinese(text_clean):
                            data_dict['弹幕内容'].append(text)
                            data_dict['弹幕内容_清洗'].append(text_clean)
    df = pd.DataFrame(data_dict)


    df['弹幕内容_清洗_'] = df['弹幕内容_清洗'].str.replace(' ','')
    df.drop_duplicates('弹幕内容_清洗_',inplace=True)

    df.to_csv(path_out, index=False,encoding='utf_8_sig')



if __name__ == '__main__':
    # date = '20210524'
    # txt_to_csv(date)

    # path = "/Users/leo/Data/项目数据/文德数慧-文本内容审核/分类实验/弹幕数据抓取/合计"
    # merge_date_per_day_0524_after_5000(path)

    path = "/data/leo/Work/Wende/弹幕数据抓取/0629_0727"
    stat(path)
    exit()


    # original_danmu_to_csv('/data/leo/Work/Wende/弹幕数据抓取/0525_0628')

    # danmu_ly_to_csv()

    # file_name = "/data/leo/Work/Wende/弹幕数据抓取/youtube/youtube_fit_comments.txt"
    # youtube_ly_to_csv(file_name)

    path_in = "/data/leo/Work/Wende/弹幕数据抓取/0629_0727"
    path_out = "/data/leo/Work/Wende/弹幕数据抓取/0629_0727/danmu_0629_0727.csv"
    danmu_to_csv(path_in,path_out)


