


### 低俗 第一批数据

# 1：敏感语料 文德提供的弹幕标注“低俗”语料 57条 精加工标注弹幕中“低俗” 1234条

# 2：非敏感语料：文德提供的弹幕标注“非低俗”语料14941，精加工标注弹幕中“非敏感” 3732条 ，随机挑选其中2000条

# 测试集比例0.1  329 2962 



### 低俗 第二批数据

# 1：敏感语料 文德提供的弹幕测试集“低俗”语料 57条，提供生产环境数据“低俗”36条， 精加工标注弹幕中“低俗” 1234条，第二次精加工标注“低俗” 2645条

# 2：非敏感语料：文德提供的弹幕标注“非低俗”语料10124，提供生产环境中错标低俗的630条，精加工标注弹幕中“非敏感” 3732条 ，第二次精加工数据标注“非敏感”2796，

# 测试集比例0.1 2125 19129



# ## 统计
# import os
# count = 0

# path = "/Users/leo/Data/项目数据/文德数慧-文本内容审核/分类实验/数据/文德语料_黄_挑选/sq"

# for root,dirs,files in os.walk(path):
#     for file in files:
#         if '.txt' in file:
#             file_name = os.path.join(root,file)
#             print(file_name)
#             with open(file_name,'r',encoding='utf-8') as f:
#                 for line in f.readlines():
#                     line = line.strip()
#                     if line:
#                         count += 1
# print(count)

# exit()

import pandas as pd
### 处理文德提供弹幕数据测试，提出非低俗部分

# def has_chinese(text):
#     if len(text) > 1:
#         for char in text:
#             if char >= '\u4e00' and char <= '\u9fa5':
#                 return True
#     return False

# df = pd.read_excel('/Users/leo/OneDrive/Work/项目工作/文德数慧-文本内容审核/分类实验/测试集结果.xlsx')

# print(len(df))
# df =  df[df['备注1'] != '低俗'].reset_index(drop=True)

# print(len(df))

# texts = []
# with open('/Users/leo/Data/项目数据/文德数慧-文本内容审核/分类实验/数据/文德语料_黄_挑选/非ds/文德提供弹幕数据测试集中非低俗.txt','w') as f:
#     for i in range(len(df)):
#         text = df.iloc[i]['弹幕审核测试文案']
#         if '跟大家击掌加油' in text:
#             continue
#         else:
#             text = text.strip()
#             if has_chinese(text):
#                 if text not in texts:
#                     f.write(text+'\n')
#                     texts.append(text)

# exit()



import pandas as pd
import os
import re
import random
random.seed(7)

data = pd.DataFrame(columns=['label','text'])

####  非敏感语料 ####

path = "/Users/leo/Data/项目数据/文德数慧-文本内容审核/分类实验/数据/文德语料_黄_挑选/非ds"

for root,dirs,files in os.walk(path):
    for file in files:
        if '.txt' in file:
            file_name = os.path.join(root,file)
            print(file_name)
            with open(file_name,'r',encoding='utf-8') as f:
                for line in f.readlines():
                    line = line.strip()
                    if line:
                        label = 0
                        text = line
                        data = data.append(pd.DataFrame({'label':[label],'text':[text]}),ignore_index=True)


####  敏感语料 ####
path = "/Users/leo/Data/项目数据/文德数慧-文本内容审核/分类实验/数据/文德语料_黄_挑选/ds"

for root,dirs,files in os.walk(path):
    for file in files:
        if '.txt' in file:
            file_name = os.path.join(root,file)
            print(file_name)
            with open(file_name,'r',encoding='utf-8') as f:
                for line in f.readlines():
                    line = line.strip()
                    if line:
                        label = 1
                        text = line
                        data = data.append(pd.DataFrame({'label':[label],'text':[text]}),ignore_index=True)



data['label'] = data['label'].astype(int)
data = data.sample(frac=1,random_state=777).reset_index(drop=True)

data.to_csv(r"/Users/leo/Data/项目数据/文德数慧-文本内容审核/分类实验/数据/data_bert/ds/v2_全训练/train.tsv",sep='\t',header=False,index=False)


data_test = data[:int(len(data)*0.1)]
data_train = data[int(len(data)*0.1):]
print(len(data_test))
print(len(data_train))



data_train.to_csv(r"/Users/leo/Data/项目数据/文德数慧-文本内容审核/分类实验/数据/data_bert/ds/v2/train.tsv",sep='\t',header=False,index=False)
data_test.to_csv(r"/Users/leo/Data/项目数据/文德数慧-文本内容审核/分类实验/数据/data_bert/ds/v2/test.tsv",sep='\t',header=False,index=False)
data_test.to_csv(r"/Users/leo/Data/项目数据/文德数慧-文本内容审核/分类实验/数据/data_bert/ds/v2/dev.tsv",sep='\t',header=False,index=False)


