#coding:utf-8

import sys
import jieba
import jieba.analyse
import jieba.posseg as pseg
import train_data
import pandas  as pd
import numpy as np
import os
from train_data import genera_data
from config import config
home_dir =  config.get_home_dir()

reload(sys)
sys.setdefaultencoding( "utf-8" )

# 生产数据 
def productTag(orgin_path):
    file_name = orgin_path.split("/")[-1]
    path = home_dir+ 'result/pos_tag_02/train_result_tag.txt'
    trainpath = home_dir + 'result/product_03/'+file_name+'_product.data'
    testpath = home_dir + 'result/test_03/'+file_name+'_tag_test.data'
    print  '==============================='+file_name+'==============================================='
    genera_data.pos_tag_word(orgin_path,path)
    genera_data.convertTag(path, trainpath, testpath, size=5, isTest=False)
    print '文件：'+file_name

#词性标注
def segment_doc():
   dir_path = home_dir+'result/text_01/'
   file_names = eachFile(dir_path)
   for file_name_path in file_names:
       productTag(file_name_path)
   print '文件标注完成！！'

# 遍历指定目录，显示目录下的所有文件名
def eachFile(filepath):
    pathDir = os.listdir(filepath)
    file_names = []
    for allDir in pathDir:
        child = os.path.join('%s%s' % (filepath, allDir))
        child.decode('gbk')  # .decode('gbk')是解决中文显示乱码问题
        file_names.append(child)
    print '原始文档目录扫描完成'
    return file_names

#文件格式批量转换
def change_file_format(file_paths):
    # 文件路径准备
    route =file_paths
    # 遍历路径下目录，文件夹，文件
    for root, dirs, files in os.walk(route):
        # 遍历文件
        for name in files:
            print "文件名; "+name
            # 拼接文件名(目录+文件名称)
            catalog = os.path.join(root, name)
            # 把所有行分割符替换为换行符\n返回.
            fp = open(catalog, "rU+")
            # 读取文件并保存
            strings = fp.read()
            fp.close()
            # 使用二进制写文件
            fp1 = open(catalog, "wb")
            fp1.seek(0)
            fp1.write(strings)
            fp1.flush()
            fp1.close()
    print '文件转换完成！'
#数据预处理
def perpare_data():
    csv_path = home_dir+'cr_data.juchao_text_01_1000.csv'
    # print csv_path
    df = pd.read_csv(csv_path,sep='\t',header=None)
    arr_list = df.values.tolist()
    i=0
    file_info_path = home_dir+'result/info.txt'
    file_info = open(file_info_path,'w')
    for data in arr_list[1:5000]:
        i+=1
        text = data[7]
        html = data[8]
        if str(text)=="nan" or str(html)=="nan":
            continue
        file_path_text = home_dir+'result/text_01/'+str(data[0])+'_'+str(i)+'.txt'
        file_path_html = home_dir+'result/html_01/'+str(data[0])+'_'+str(i)+'.txt'
        file_text = open(file_path_text,'w')
        file_html = open(file_path_html,'w')
        file_text.write(text)
        file_html.write(html)
        file_info.write(str(data[0])+'\t'+str(data[1])+'\t'+str(data[2])+'\t'+str(data[3])+'\r\n')
        file_text.flush()
        file_html.flush()
        print '文档id: ' + str(data[0]),'当前数量：'+str(i)
    file_info.flush()
    print '文档生成结束'

if __name__ == "__main__":
    # #原始数据处理
   # perpare_data()
    # #词性标注
     segment_doc()
    # file_paths = r"f:/abc_kownledgegraph/train_data/"
    # #文件格式批量转黄
    # change_file_format(file_paths)