import inspect
import json
import os.path
import pickle

from django_redis import get_redis_connection

from common.constant import response_code, pkl_key_dict, department_person_file, OneDay
from common.sql_constant_enum import bug_status_corresponding
from datetime import date, datetime

# 处理状态码和返回信息
from common.sql_instance_config import department_person_info_list_sql
from utils.cursorHandler import cursor


def add_reponse_code_msg(_data):
    _data.update({
        "code": 200,
        "msg": "success"
    })

# 处理数据为JSON格式
def serialize_json_dumps(origin_data, code_type="success"):
    # 这里被迫处理响应的返回状态信息
    origin_data.update(
        response_code[code_type]
    )
    return json.dumps(origin_data, ensure_ascii=False)


def find_2_item_in_list(goal, compare_list):
    count = 0
    for item in compare_list:
        if goal == item['name']:
            count += 1
    if count == 2:
        return 1
    else:
        return 0


def compute_rank_6_efficient(origin_list):
    # 获取所有的出现2次人员名单以及对应的数据
    person_set = {person['name'] for person in origin_list}
    person_num = 0
    person_efficient = -1
    person_score = {}
    for person in person_set:
        # print('------------------------------', person)
        if find_2_item_in_list(person, origin_list):
            # print('-------------', origin_list)
            for each_person in origin_list:
                if person == each_person['name']:
                    # 每个人的有效Bug数
                    person_efficient = int(each_person['count'])
                    if person not in person_score:
                        person_score[person] = [person_efficient]
                    else:
                        person_score[person].append( round(person_score[person][0] / int(each_person['count']) * 100, 2))
                        person_score[person].append( int(each_person['count']))
    return sorted(person_score.items(), key=lambda x: x[1][1], reverse=True)


# datetime Json序列化
class ComplexEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, datetime):
            return obj.strftime('%Y-%m-%d %H:%M:%S')
        elif isinstance(obj, date):
            return obj.strftime('%Y-%m-%d')
        else:
            return json.JSONEncoder.default(self, obj)

def serialize_json_date_type(date_type):
    if date_type is None:
        return json.dumps(date_type, cls=ComplexEncoder)
    return json.dumps(date_type, cls=ComplexEncoder)[1:-1]


def down_chunk_file_manager(file_location, chuck_size=1024):
    """
    文件分片下发
    :param file_path:
    :param chuck_size:
    :return:
    """
    if not os.path.exists(file_location):
        print('-----------文件不存在-------------')
        return
    with open(file_location, "rb") as file:
        while True:
            chuck_stream = file.read(chuck_size)
            if chuck_stream:
                yield chuck_stream
            else:
                break


def write_download_file(file_localtion, columns, values):
    with open(file_localtion, "w", encoding='UTF-8') as file:
        print(*columns, sep=',', file=file)
        for each_value in values:
            print('-----------------------', each_value)
            print(*each_value, sep=',', file=file)


def transfer_dict_to_str(_dict):
    print('====================', _dict)
    str=''
    for key, value in _dict.items():
        str += '%s:%s&' % (bug_status_corresponding[key], value)
    return str[:-1]


def transfer_list_to_str(_list):
    print('====================', _list)
    str=''
    for each in _list:
        str += f' ~ {each}'
    return str


def fetch_file_abs_path(_current_file=__file__, _goal_file=''):
    return os.path.normpath(
        os.path.join(
            os.path.dirname(os.path.split(_current_file)[0])
            , _goal_file)
    )

def read_pkl_file(file_path, key_object=''):
    """
        本函数用于将本次存储从pkl做统一的读取返回
        file_path: pkl 文件的路径，默认是在 statisic 目录
        key_object: 读取对应文件的关键字
    """
    # 创建文件
    if not os.path.exists(file_path):
        with open(file_path, 'wb') as f:
            # 对于部门人员做重新录入
            # if file_path == department_person_file:
            #     pickle.dump(
            #         {
            #             "person_infos": [
            #                 {
            #                     "id": each_tester.get("id"),
            #                     "pingying": each_tester.get("pingying"),
            #                     "dpt_name": each_tester.get("dpt_name"),
            #                     "tester_name": each_tester.get("tester_name")
            #                  } for each_tester in cursor.search_alone(department_person_info_list_sql)
            #             ]
            #         }, f
            #     )
            pass
    with open(file_path, 'rb') as f:
        try:
            tester_info_list = pickle.load(f)
            print(f'{key_object} ==>',tester_info_list)
        except EOFError:
            return None
    if key_object:
        return tester_info_list.get(key_object, None)
    return tester_info_list


def write_pkl_file(file_path, pkl_content, key_word=''):
    """
        本函数用于将本次存储从pkl做统一的读取返回
        file_path: pkl 文件的路径，默认是在 statisic 目录
        key_object: 读取对应文件的关键字
    """
    if key_word:
        pkl_content = {key_word: pkl_content}
    with open(file_path, 'wb') as f:
        pickle.dump(pkl_content, f)


def redis_data_get(key_name, search_sql, data_timeout=OneDay):
    """
        本函数用于redis读取数据，如果没有则重新查询
        key_name: 查询或者保存到redis的键值
        search_sql： 查询的SQL语句
        data_timeout： 超时时间
        return 数据查询的结果
    """
    with get_redis_connection() as redis_client:
        if redis_client.get(key_name) and redis_client.get(key_name) != '[]':
            data_result = json.loads(redis_client.get(key_name))
        else:
            data_result = cursor.search_alone(search_sql)
            redis_client.set(key_name, json.dumps(data_result), ex=data_timeout)
    return data_result


def redis_hot_data_save(key_name, value_data, data_timeout=OneDay):
    """
        本函数将数据存储到redis中
    """
    with get_redis_connection() as redis_client:
        redis_client.set(key_name, json.dumps(value_data), ex=data_timeout)
    return True


def redis_hot_data_read(key_name):
    with get_redis_connection() as redis_client:
        if redis_client.get(key_name) and redis_client.get(key_name) != '[]':
            return json.loads(redis_client.get(key_name))
        else:
            return []

def get_self_function_name():
    return inspect.stack()[1][3]


def get_file_dirname_path(_file, par_dir=False, concat_file=''):
    """
        通过 __file__ 传递，得到传递文件所在目录，或者其父目录
    :param file: 当前文件的绝对路径
    :param par_dir: 是否得到此文件所在目录的父目录，通常为项目根目录
    :param concat_file: 拼接文件路径
    :return: 绝对路径
    """
    # assert os.path.exists(_file), f'传递的文件路径不存在{_file}'
    # 当前文件所在目录的上一级目录
    if par_dir:
        _path = os.path.abspath(
            os.path.dirname(os.path.dirname(
                os.path.split(_file)[0]
            ))
        )
        if concat_file:
            return os.path.normpath(os.path.join(_path, concat_file))
        return os.path.normpath(_path)
    # 当前文件所在目录
    else:
        if concat_file:
            return os.path.normpath(os.path.join(os.path.dirname(os.path.split(_file)[0]), concat_file))
        return os.path.normpath(os.path.dirname(os.path.split(_file)[0]))


def time_parse_string(time_string, string_format="%Y-%m-%d %H:%M:%S", to_time_type="time_stamp"):
    """
        解析时间格式的字符串，为time包里面的结构化时间，
        @param to_time_type: 返回类型的指定，"struct_time": 表示返回格式化时间类型， "time_stamp"：表示返回时间戳

    """
    if to_time_type == "struct_time":
        return datetime.strptime(time_string, string_format)
    elif to_time_type =="time_stamp":
        return datetime.strptime(time_string, string_format).timestamp()

