from multiprocessing import Process
from faker import Faker
import os
import csv
import random
from make_data.models.CustomProvider import CustomProvider
from make_data.utils.FileUtils import FileUtils
import datetime

# 设定faker文化语种，可更改en_US等，此处默认中文
faker = Faker(locale='zh_CN')
# 添加自定义数据供应商到faker 库
faker.add_provider(CustomProvider)


# 支持多进程造数------之前多线程操作同一文件~~~会阻塞，效率低，修正为多进程
# 开启多进程造数，根据数据量 创建有序命名临时文件，然后合并文件
def multi_process(process_num, file_type, data_file_path, file_name, data_num, data_str, split_str):
    FileUtils(data_file_path).fix_file_path()
    FileUtils(data_file_path + os.sep + file_name).fix_file_path()

    # 进程集合
    processes = []
    file_type, write_file_method, other_info = adapter_suffix_method(file_type)
    process_num = int(process_num)
    data_num = int(data_num)
    remainder_num = data_num % process_num
    # 生成进程负责文件数据长度数组
    num_list = [int(data_num / process_num)] * process_num
    if remainder_num != 0:
        num_list[-1] = num_list[-1] + remainder_num

    for num in range(0, len(num_list)):
        if file_type == ".sql":
            args = (
                data_file_path + os.sep + file_name + os.sep + file_name + str(num) + file_type,
                num_list[num],
                data_str,
                split_str, other_info,)
        else:
            args = (
                data_file_path + os.sep + file_name + os.sep + file_name + str(num) + file_type,
                num_list[num],
                data_str,
                split_str,)
        processes.append(
            Process(target=write_file_method,
                    args=args))

    for process in processes:
        process.start()
    for process in processes:
        process.join()
    # 合并线程文件
    FileUtils(data_file_path + os.sep + file_name).text_file_merger(file_name)


# 生成sql数据
def write_sql_file(file_dir, data_num, data_str, split_str, sql_pre_fix):
    fw = open(file_dir, 'w', encoding='utf-8')
    k = 0
    for _ in range(int(data_num)):
        k += 1
        if k == 1:
            # 处理末尾分隔符
            data_str = data_str + "buildDataFlag"
            data_str = data_str.replace("+\'" + split_str + "\'+buildDataFlag", "")
        data_str = data_str.replace("+\'" + split_str + "\'+", "+\'" + split_str + "buildDataFlag\'+")
        final = eval(data_str)
        final = final.replace(split_str + "buildDataFlag", '","')
        final = sql_pre_fix + " VALUES " + '("' + final + '");'
        fw.write(final + "\n")


#  生成csv数据
def write_csv_file(file_dir, data_num, data_str, split_str):
    # 指定文件编码
    fw = open(file_dir, "w", newline='', encoding="gbk")
    fw = csv.writer(fw)
    k = 0
    for _ in range(int(data_num)):
        k += 1
        if k == 1:
            # 处理末尾分隔符
            data_str = data_str + "buildDataFlag"
            data_str = data_str.replace("+\'" + split_str + "\'+buildDataFlag", "")
        data_str = data_str.replace("+\'" + split_str + "\'+", "+\'" + split_str + "buildDataFlag\'+")
        final = eval(data_str)
        final = final.split(split_str + "buildDataFlag")
        print(final)
        fw.writerow(final)


# 生成txt数据
def write_txt_file(file_dir, data_num, data_str, split_str):
    fw = open(file_dir, 'w', encoding='utf-8')
    k = 0
    for _ in range(int(data_num)):
        k += 1
        if k == 1:
            # 处理末尾分隔符
            data_str = data_str + "buildDataFlag"
            data_str = data_str.replace("+\'" + split_str + "\'+buildDataFlag", "")
        final = eval(data_str)
        fw.write(final + "\n")


# 适配文件后缀名及生成方法
def adapter_suffix_method(file_type):
    if file_type == "1":
        return ".txt", write_txt_file, "txt预留参数"
    elif file_type == "2":
        return ".csv", write_csv_file, "csv预留参数"
    else:
        sql_prefix = input("请输入sql insert语句前缀，例：【INSERT INTO table_name (列1, 列2,...) 】：")
        return ".sql", write_sql_file, sql_prefix

# TODO 新增一个用于判断是否动态依赖其他值的数据生成方法，读取到时去除 依赖项和动态生成项  生成后再拼接返回
# final = eval(data_str,split_str)
