# -*- coding: utf-8 -*-
# @Author  : suny570
# @Time    : 2022-04-04 22:12

import re
import os
import zipfile
import tarfile
import shutil
import pandas as pd

def read_file(filename):
    try:
        file_name = filename
        with open(file_name, 'r',encoding='utf-8') as f:
            list = f.readlines()
        return list
    except FileNotFoundError:
        print("File is not found.")
    except Exception:
        print("You don't have permission to access this file.")

# 保存网页内容到本地文件
def save_response_file(file_path, file_title,response):
    if file_title is None:
        print("file_title is null\t:" + response.url)
        return False
    else:
        # 去掉非法字符
        title_re = re.sub('[\/:*?"<>|]', '-', file_title)
        file_name = title_re + '.html'
        body = response.body.decode(response.encoding)
        print("save_file-----" + file_name)
        # 能打印出来
        #print("save_file-----" + str(body))
        with open(file_path + file_name, 'w') as f:
            # 不能使用safari打开（乱码），使用文本编辑器
            #f.write(response.body)
            f.write(str(body))

# 从文件中读取内容
def get_file_info(file_name):

    f = open(file_name, 'r', encoding='utf-8')
    lines = f.readlines()
    rows = []
    for line in lines:
        if len(line) > 0:
            arr_cols = []
            text = line.replace('\n', '').split('\t')
            # 注意不同的解析过程
            for k in text:
                arr_cols.append(k)
            rows.append(arr_cols)
    # df_data = pd.read_csv(file_name,keep_default_na=True)
    # rows = []
    # for i_item, j_item in df_data.iterrows():
    #     arr_cols = []
    #     arr_cols.append(j_item['question'])
    #     arr_cols.append(j_item['answer'])
    #     rows.append(arr_cols)
    # print("the standard question=answer pair is " + str(len(rows)))
    return rows

def get_csv_file_info(data_file):
    df_data = pd.read_csv(data_file)
    rows = []
    for i_item, j_item in df_data.iterrows():
        arr_cols = []
        arr_cols.append(j_item['question'])
        arr_cols.append(j_item['answer'])
        rows.append(arr_cols)
    # print("the standard question=answer pair is " + str(len(rows)))
    return rows

# 标准数据格式
def read_file_get_data_standards(file_path, index_name, document_type, fields_arr, file_flag):
    if file_flag == 'csv_file':
        file_info = get_csv_file_info(file_path)
    else:
        file_info = get_file_info(file_path)
    document_list = []
    if file_info is None:
        return None

    header = []
    header_map = {}
    if fields_arr is None:
        return document_list
    for col in fields_arr:
        header.append(col)

    header_map = dict(enumerate(header))
    idx = 1
    for row in file_info:

        document = {
            "_index": index_name,
            "_type": document_type,
            "_id": idx,
            "_source": {}
        }

        # 单独的ID 严格区分
        document["_source"][header_map[0]] = idx
        document["_source"][header_map[1]] = 'denghy35'
        document["_source"][header_map[2]] = ''
        document["_source"][header_map[3]] = '.txt'
        document["_source"][header_map[4]] = row[0]
        document["_source"][header_map[5]] = row[1]
        document["_source"][header_map[6]] = 1
        document["_source"][header_map[7]] = "2022-05-30 15:06:30"
        document["_source"][header_map[8]] = "2022-05-30 15:06:30"
        document["_source"][header_map[9]] = 0
        document["_source"][header_map[10]] = ''
        document["_source"][header_map[11]] = ''
        document["_source"][header_map[12]] = ''
        document["_source"][header_map[13]] = ''
        document["_source"][header_map[14]] = ''
        document["_source"][header_map[15]] = ''
        document["_source"][header_map[16]] = ''
        idx += 1
        document_list.append(document)
    return document_list

# 新闻数据格式
def read_file_get_data_news(file_path, index_name, document_type, fields_arr):

    file_info = get_file_info(file_path)
    document_list = []
    if file_info is None:
        return None

    header = []
    header_map = {}
    if fields_arr is None:
        return document_list
    for col in fields_arr:
        header.append(col)

    header_map = dict(enumerate(header))
    idx = 1
    for row in file_info:

        document = {
            "_index": index_name,
            "_type": document_type,
            "_id": idx,
            "_source": {}
        }
        # 单独的ID 严格区分
        document["_source"][header_map[0]] = idx               #id
        document["_source"][header_map[1]] = 'denghy35'        #userid
        document["_source"][header_map[2]] = row[0]            #content_ana_type
        document["_source"][header_map[3]] = row[1]            #title
        document["_source"][header_map[4]] = row[2]            #content
        document["_source"][header_map[5]] = '.txt'            #content_type
        document["_source"][header_map[6]] = 'http://www.xxxxx.com'   #content_link
        document["_source"][header_map[7]] = 1                        #status
        document["_source"][header_map[8]] = '2021-12-25 15:06:30'    #createtime
        document["_source"][header_map[9]] = '2021-12-25 15:06:30'  #updatetime
        document["_source"][header_map[10]] = ''
        document["_source"][header_map[11]] = ''
        document["_source"][header_map[12]] = ''
        document["_source"][header_map[13]] = ''
        document["_source"][header_map[14]] = ''
        document["_source"][header_map[15]] = ''
        document["_source"][header_map[16]] = ''
        document["_source"][header_map[17]] = ''
        idx += 1

        document_list.append(document)
    return document_list

# get all files abs name
def get_all_files_name(path):
    result = []
    for path,subdirs,files in os.walk(path):
        files.extend(subdirs)
        files.sort()
        for name in files:
            if name.find('.DS_Store') == -1:
                result.append(os.path.join(path,name))

    return result

def move_file(srcfile, dstfile):
    if not os.path.isfile(srcfile):
        print("%s not exist!" % (srcfile))
    else:
        fpath,fname=os.path.split(dstfile)
        if not os.path.exists(fpath):
            os.makedirs(fpath)
        shutil.move(srcfile,dstfile)
        print("move %s -> %s"%( srcfile,dstfile))

def copy_file(srcfile, dstfile):
    if not os.path.isfile(srcfile):
        print("%s not exist!" % (srcfile))
    else:
        fpath, fname=os.path.split(dstfile)
        if not os.path.exists(fpath):
            os.makedirs(fpath)
        shutil.copyfile(srcfile,dstfile)
        print("copy %s -> %s"%( srcfile,dstfile))

#打包目录为zip文件（未压缩）
def make_zip(source_dir, output_filename):
    zipf = zipfile.ZipFile(output_filename, 'w')
    pre_len = len(os.path.dirname(source_dir))
    for parent, dirnames, filenames in os.walk(source_dir):
        for filename in filenames:
            pathfile = os.path.join(parent, filename)
            arcname = pathfile[pre_len:].strip(os.path.sep)   #相对路径
            zipf.write(pathfile, arcname)
    zipf.close()

#一次性打包整个根目录。空子目录会被打包。
#如果只打包不压缩，将"w:gz"参数改为"w:"或"w"即可。
def make_targz(output_filename, source_dir):
    with tarfile.open(output_filename, "w:gz") as tar:
        tar.add(source_dir, arcname=os.path.basename(source_dir))

def make_targz_one_by_one(output_filename, source_dir):
    tar = tarfile.open(output_filename,"w:gz")
    for root,dir,files in os.walk(source_dir):
        for file in files:
            pathfile = os.path.join(root, file)
            tar.add(pathfile)
    tar.close()

if __name__ == '__main__':
    # import pandas
    csv_file = r'F:\AI_work\ES_based on search\corpus_class\S2_data.csv'
    get_csv_file_info(csv_file)
