import argparse
import gzip
import json
import os
import pickle
import random
import subprocess
import sys
import time
from datetime import date, datetime
from glob import glob
from urllib import parse, request
import re
import itertools

import numpy as np
import torch
from tqdm.std import tqdm

import yaml

"""
超级常用的函数！
"""

# I/O
def is_in_notebook():
    return "ipykernel" in sys.modules


def clear_output():
    """
    clear output for both jupyter notebook and the console
    """
    os.system("cls" if os.name == "nt" else "clear")
    if is_in_notebook():
        from IPython.display import clear_output as clear

        clear()


def read_json(path="test.json"):
    with open(path, "r", encoding="utf-8") as f1:
        res = json.load(f1)
    return res


def read_json_from_path(path_patten):
    paths = sorted(glob(path_patten))
    datas = [
        read_json(i) for i in tqdm(paths, ncols=50, desc=f"Loading from {path_patten}")
    ]
    return datas


def yield_json_from_path(path_patten):
    paths = sorted(glob(path_patten))
    datas = (read_json(i) for i in paths)
    return datas


def read_yaml(path, to_args=False, **kwargs):
    """
    kwargs表示需要额外添加到args中的配置。若和yml重名，打印警告并以kwargs为优先
    示例：
        import argparse
        parser = argparse.ArgumentParser()
        parser.add_argument("--config", "-c", default="demo.yml", type=str, help="")
        args = parser.parse_args()
        args = read_yaml(args.config, to_args=True, **vars(args))
        print(args)
    """
    with open(path) as f:
        data = yaml.load(f, Loader=yaml.FullLoader)

    # add
    for k, v in kwargs.items():
        if k in data:
            print(
                f"overwrite warning! key `{k}` in yml file `{path}` has been replaceed by `{v}` in args."
            )
        data[k] = v

    # float
    for k in data.keys():
        try:
            if "e" in data[k]:
                data[k] = float(data[k])
        except:
            pass

    if to_args:
        data = argparse.Namespace(**data)

    # torch
    data.device = (
        torch.device(f"cuda:{data.cuda_index}")
        if data.cuda_index > -1
        else torch.device("cpu")
    )
    if data.cuda_index > -1:
        torch.cuda.set_device(data.cuda_index)

    return data


class ComplexEncoder(json.JSONEncoder):
    """
    时间异常处理方法
    """

    def default(self, obj):
        if isinstance(obj, datetime):
            return obj.strftime("%Y-%m-%d %H:%M:%S")
        elif isinstance(obj, date):
            return obj.strftime("%Y-%m-%d")
        else:
            return json.JSONEncoder.default(self, obj)


def save_to_json(obj, path, _print=True):
    if _print:
        print(f"SAVING: {path}")
    if type(obj) == set:
        obj = list(obj)
    dirname = os.path.dirname(path)
    if dirname and dirname != ".":
        os.makedirs(dirname, exist_ok=True)
    with open(path, "w", encoding="utf-8") as f1:
        json.dump(obj, f1, ensure_ascii=False, indent=4, cls=ComplexEncoder)
    if _print:
        res = subprocess.check_output(f"ls -lh {path}", shell=True).decode(
            encoding="utf-8"
        )
        print(res)


def read_pkl(path="test.pkl"):
    with open(path, "rb") as f1:
        res = pickle.load(f1)
    return res


def save_to_pkl(obj, path, _print=True):
    dirname = os.path.dirname(path)
    if dirname and dirname != ".":
        os.makedirs(dirname, exist_ok=True)
    with open(path, "wb") as f1:
        pickle.dump(obj, f1)
    if _print:
        res = subprocess.check_output(f"ls -lh {path}", shell=True).decode(
            encoding="utf-8"
        )
        print(res)


def read_jsonl(path="test.jsonl", desc="", max_instances=None):
    with open(path, "r", encoding="utf-8") as f1:
        res = []
        _iter = tqdm(enumerate(f1), desc=desc, ncols=100) if desc else enumerate(f1)
        for idx, line in _iter:
            if max_instances and idx >= max_instances:
                break
            res.append(json.loads(line.strip()))
    return res


def jsonl_generator(path, topn=None, total=None, postfix_func=None, **kwargs):
    """
    usage:
    succ = 0
    def _update():
        return {"success": succ}
    for item in jsonl_generator(path, total=123, postfix_func=_update):
    """
    if not os.path.exists(path):
        return None
    total = total or file_line_count(path)
    total = min(topn, total) if topn else total
    with open(path) as f1:
        pbar = tqdm(f1, total=total, ncols=100, **kwargs)
        for idx, line in enumerate(pbar):
            if topn and idx >= topn:
                break
            yield json.loads(line.strip())
            if postfix_func:
                info = postfix_func()
                pbar.set_postfix(ordered_dict=info)


def save_to_jsonl(obj, path, _print=True):
    """
    Object of type set is not JSON serializable. so PAY ATTENTION to data type.
    """
    if isinstance(obj, set):
        obj = list(obj)
    elif isinstance(obj, dict):
        obj = obj.items()
    dirname = os.path.dirname(path)
    if dirname and dirname != ".":
        os.makedirs(dirname, exist_ok=True)
    with open(path, "w", encoding="utf-8") as f1:
        for line in tqdm(obj, ncols=100, desc="saving jsonl"):
            f1.write(json.dumps(line) + "\n")
    if _print:
        res = subprocess.check_output(f"ls -lh {path}", shell=True).decode(
            encoding="utf-8"
        )
        print(res)


def save_to_gzip(data, path):
    os.makedirs(os.path.dirname(path), exist_ok=True)
    data = bytes(data, "utf8")
    with gzip.open(path, "wb") as f:
        f.write(data)
    print(f"SAVE: {path}")


def smart_read_line(path):
    """
    support gzip and bz2 format
    tar.gz should be uncompressed
    """
    if path.endswith(".tar.gz"):
        raise ValueError(f"{path} should be uncompressed.")
    elif path.endswith(".gz"):
        import gzip

        with gzip.open(path, "rb") as f:
            for l in f:
                yield (l.decode("utf-8"))
    elif path.endswith(".bz2"):
        import bz2

        with bz2.open(path) as f:
            for l in f:
                yield (l.decode("utf-8"))
    else:
        with open(path, encoding="utf-8") as f:
            for l in f:
                yield (l)


def get_filename(path):
    """
    去除路径和扩展名的文件名
    """
    return os.path.splitext(os.path.basename(path))[0]


# model


def freeze_to_layer_by_name(model, layer_name, exclude_layers=[]):
    """
    冻结层. 从0到layer_name，左闭右闭区间，只要layer_name in就算
    """
    if not layer_name:
        return

    # state_dict may not equal to named_parameters.
    keys = [i[0] for i in model.named_parameters()]
    if layer_name == "all":
        index_start = len(keys)
    else:
        index_start = -1
        for index, key in enumerate(keys):
            if layer_name in key:
                index_start = index
                break

    exclude_idxs = set()
    for name in exclude_layers:
        for index, key in enumerate(keys):
            if name in key:
                exclude_idxs.add(index)

    if index_start < 0:
        print(f"Don't find layer name: {layer_name}")
        print(f"must in : \n{keys}")
        return

    grad_nums = 0
    for index, i in enumerate(model.parameters()):
        if index > index_start or index in exclude_idxs:
            i.requires_grad = True
            grad_nums += 1
        else:
            i.requires_grad = False

    print(f"freeze layers num: {index_start}, active layers num: {grad_nums}.")


def get_parameter_number(net, optimizer=None):
    """
    统计模型参数量
    """
    total_num = sum(p.numel() for p in net.parameters())

    if optimizer:
        trainable_num = sum(
            [para.numel() for item in optimizer.param_groups for para in item["params"]]
        )
    else:
        trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)

    return {"Total": total_num, "Trainable": trainable_num}


# func
def parameters_combinations(paras:dict):
    """
    全排列
    """
    names = list(paras.keys())
    values = list(paras.values())
    res = list(itertools.product(*values))
    combs = [{names[idx]:v for idx,v in enumerate(vs)} for vs in res ]
    return combs

def time_now(fotmat="%Y-%m-%d %H:%M:%S"):
    """
    获取时间
    """
    date_time = datetime.now().strftime(fotmat)
    return date_time


def split_train_dev_test(items, ratio=[0.7, 0.2, 0.1], seed=123):
    """
    切分数据
    """
    assert abs(sum(ratio) - 1.0) < 1e-9
    ratio.sort()
    ratio = ratio[::-1]
    random.seed(seed)
    random.shuffle(items)
    if len(ratio) == 2:
        _num = int(len(items) * ratio[0])
        return items[:_num], items[_num:]
    elif len(ratio) == 3:
        _num1 = int(len(items) * ratio[0])
        _num2 = int(len(items) * (ratio[0] + ratio[1]))
        return items[:_num1], items[_num1:_num2], items[_num2:]
    else:
        raise ValueError("ratio长度不对")


def tokenized_to_device(res_dict, DEVICE):
    """
    huggingface tokenizer结果默认不在cuda上
    """
    res_dict = {k: v.to(DEVICE) for k, v in res_dict.items()}
    return res_dict


def _safe_division(numerator, denominator):
    if abs(denominator) < 1e-9:
        return 0
    return numerator / denominator


def try_pop_keys(item, keys):
    for k in keys:
        if k in item:
            item.pop(k)
    return item


SENT_SPLIT = " @_s!@ "


def to_sent(sp=SENT_SPLIT, return_list=False):
    """
    return_list: return list of str sent.
    """
    import spacy

    nlp = spacy.load("en_core_web_sm")

    def _do(text, lemma=False):
        sents = []
        desc_len = 0
        try:
            sents = [
                [tok.lemma_ if lemma else tok.text for tok in sent if tok.text != "\n"]
                for sent in nlp(text).sents
            ]
            desc_len = sum([len(sent) for sent in sents])
            sents = [" ".join(sent) for sent in sents]
            if not return_list:
                sents = sp.join(sents)
        except Exception as e:
            print(text, e)

        return desc_len, sents

    return _do


def get_logger(path="logs.log", logger_name=__name__, filemode="a"):
    """
    usage:
        logger = get_logger()
        logger.info()
    """
    import logging

    logging.basicConfig(
        level=logging.WARN,
        format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
        filename=path,
        filemode=filemode,
        force=True,
    )
    os.makedirs(os.path.dirname(path), exist_ok=True)
    logger = logging.getLogger(logger_name)
    return logger


def merge_dict(*dicts, mode="add"):
    """
    合并多个dict
    """
    if mode not in ["add", "first", "last"]:
        print(colorful(f"mode: {mode} NOT in [add, first, last], default is `first`!"))
        mode = "first"
    tmp = {}
    if mode == "add":
        for _d in dicts:
            for k, v in _d.items():
                if k in tmp:
                    tmp[k] += v
                else:
                    tmp[k] = v
    # keep the first one
    elif mode == "first":
        for _d in dicts:
            for k, v in _d.items():
                if not k in tmp:
                    tmp[k] = v
    # overwrite with the last one
    else:
        for _d in dicts:
            for k, v in _d.items():
                tmp[k] = v

    return tmp


# pandas


def reduce_mem_usage(df, verbose=True):
    """
    传入pandas dataframe，自动判断数据需要什么类型。
    """
    start_mem = df.memory_usage().sum() / 1024 ** 2
    numerics = ["int16", "int32", "int64", "float16", "float32", "float64"]
    for col in df.columns:
        col_type = df[col].dtypes
        if col_type in numerics:
            c_min = df[col].min()
            c_max = df[col].max()
            if str(col_type)[:3] == "int":
                # numpy.iinfo()函数显示整数类型的机器限制。
                if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
                    df[col] = df[col].astype(np.int8)
                elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
                    df[col] = df[col].astype(np.int16)
                elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
                    df[col] = df[col].astype(np.int32)
                elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
                    df[col] = df[col].astype(np.int64)
            else:
                if (
                    c_min > np.finfo(np.float16).min
                    and c_max < np.finfo(np.float16).max
                ):
                    df[col] = df[col].astype(np.float16)
                elif (
                    c_min > np.finfo(np.float32).min
                    and c_max < np.finfo(np.float32).max
                ):
                    df[col] = df[col].astype(np.float32)
                else:
                    df[col] = df[col].astype(np.float64)
    end_mem = df.memory_usage().sum() / 1024 ** 2
    if verbose:
        print("memory usage after optimization is: {:.2f} MB".format(end_mem))
        print("decrease by {:.1f}%".format(100 * (start_mem - end_mem) / start_mem))
    return df


# html
def replace_ignorecase(text, _from, _to):
    res = re.sub(str(_from), str(_to), str(text), flags=re.I)
    return res


def replaces(text, maps, ignorecase=False):
    """
    替换字符
    """
    for k, v in maps.items():
        if ignorecase:
            text = replace_ignorecase(text, k, v)
        else:
            text = text.replace(k, v)
    return text


def colorful(text, color="yellow"):
    """
    python 高亮 彩色显示
    只改背景色
    －－显示方式：0（默认值）、1（高亮）、22（非粗体）、4（下划线）、24（非下划线）、5（闪烁）、25（非闪烁）、7（反显）、27（非反显）
    －－前景色：30（黑色）、31（红色）、32（绿色）、33（黄色）、34（蓝色）、35（洋红）、36（青色）、37（白色）
    －－背景色：40（黑色）、41（红色）、42（绿色）、43（黄色）、44（蓝色）、45（洋红）、46（青色）、47（白色）
    """
    if color == "yellow":
        text = "\033[1;33;33m" + text + "\033[0m"
    elif color == "grey":
        text = "\033[1;30;47m" + text + "\033[0m"
    elif color == "green":
        text = "\033[1;32;32m" + text + "\033[0m"
    else:
        pass
    return text


# demo
def make_args():
    """
    命令行参数解析，示例用法。
    """
    parser = argparse.ArgumentParser()

    parser.add_argument("--data", default="data", type=str, help="数据路径")
    parser.add_argument("--load_model", action="store_true", help="是否加载")
    parser.set_defaults(load_model=True)

    args = parser.parse_args()
    return args


# 多参数，多进程


def _parse_one(_tuple):
    x, y_list = _tuple
    for i in y_list:
        i["ttt"] = x
    return {x: y_list}


def multi_demo():
    """
    使用functools.partial将函数fun的参数y固定，从而达到传入多个参数的目的。
    """

    def _gen():
        inputs = {"asd1": [{"a": 1230}], "asd2": [{"a": 1230}]}
        for i in inputs.items():
            yield i

    # debug
    for i in _gen():
        y = _parse_one(i)

    import functools
    from multiprocessing import Pool, cpu_count

    pool = Pool(cpu_count())
    mapper = functools.partial(_parse_one)

    pbar = tqdm(total=4, ncols=80)
    res = []
    for r in pool.imap(mapper, _gen()):
        res.append(r)
        pbar.update(1)
        pbar.set_description_str(f"qweqwe")
        pbar.set_postfix(ordered_dict={"k": 1})
        pbar.set_postfix_str(s="")
    print(res)


def _tokenize_to_file(args, i, num_process, in_path, out_path, line_fn):

    with open(in_path, "r", encoding="utf-8") as in_f, open(
        "{}_split{}".format(out_path, i), "wb"
    ) as out_f:
        for idx, line in enumerate(in_f):
            if idx % num_process != i:
                continue
            out_f.write(line_fn(args, line, tokenizer))


def multi_demo_2(args, num_process=8):
    """
    能知道进程号的一种写法
    返回值不能获取？
    也没有将文件拆分为小块，而是根据进程号选择对应的行数
    """
    from multiprocessing import Process

    processes = []
    for i in range(num_process):
        p = Process(
            target=_tokenize_to_file,
            args=(args, i, num_process, in_path, out_path, line_fn),
        )
        processes.append(p)
        p.start()
    for p in processes:
        p.join()


# wrapper


def timer(func):
    """
    函数执行时间装饰器
    """

    def wrapper(*args, **kwargs):
        start = time.time()
        res = func(*args, **kwargs)
        print("time consume: {:.2f}s .".format(time.time() - start))
        return res

    return wrapper


import functools
from concurrent import futures


def timeout(seconds):
    """
    超时装饰器 from https://zhuanlan.zhihu.com/p/355563762
    """
    executor = futures.ThreadPoolExecutor(1)

    def decorator(func):
        @functools.wraps(func)
        def wrapper(*args, **kw):
            future = executor.submit(func, *args, **kw)
            return future.result(timeout=seconds)

        return wrapper

    return decorator


def wc_l(path):
    try:
        res = subprocess.check_output(f"wc -l {path}", shell=True).decode(
            encoding="utf-8"
        )
        line_num = int(res.split()[0])
    except Exception as e:
        line_num = None
    return line_num


@timeout(30)
def file_line_count(path):
    return wc_l(path)


# message
@timer
def send_meg_to_wx(sendkey, text="default text", desp="default content"):
    """
    给wx发送消息
    echo "# server key
    export sctapi_sendkey=xxxx
    " >> ~/.bashrc && source ~/.bashrc
    基于server酱：https://sct.ftqq.com/
    """
    if sendkey is None:
        return
    query_args = {"text": text, "desp": desp}

    r = request.Request(
        url=f"https://sctapi.ftqq.com/{sendkey}.send",
        data=parse.urlencode(query_args).encode("utf-8"),
    )
    response = request.urlopen(r).read().decode("utf-8")
    print(response)


if __name__ == "__main__":
    s = to_sent()
    s("i ate apples.", lemma=True)
    # multi_demo()
    # sendkey = os.environ.get("sctapi_sendkey", None)
    # send_meg_to_wx(sendkey=sendkey, text="测试123", desp="内容123")
    # print(file_line_count("common_utils.py"))
