import requests, os, time
import zipfile



# 添加任务
def add_task(task_id,itemArrs):
    pass

# 获取任务
def get_task(size):
    pass

# 是否重复
def is_data_exists(task_id,itemArrs):
    pass

# 保存数据
def save_data(task_id,resList):
    pass


def log(path, content, root="./log/"):
    if isinstance(content, list):
        content = "".join(content)
    msg = content if len(content) < 40 else content[:40]
    print("log", "-" * 10, msg)
    r = time.strftime("%Y-%m-%d %H-%M-%S -- ", time.localtime()) + content
    path = root + path
    root = os.path.split(path)[0]
    if not os.path.exists(root):
        os.makedirs(root)
    with open(path, "a", encoding="utf-8")as f:
        f.write("\n")
        f.write(r)


def to_zip(start_dir, outpath):
    start_dir = start_dir  # 要压缩的文件夹路径
    mk(outpath)
    z = zipfile.ZipFile(outpath, 'w', zipfile.ZIP_DEFLATED)
    for dir_path, dir_names, file_names in os.walk(start_dir):
        # 这一句很重要，不replace的话，就从根目录开始复制
        f_path = dir_path.replace(start_dir, '')
        f_path = f_path and f_path + os.sep or ''  # 实现当前文件夹以及包含的所有文件的压缩
        for filename in file_names:
            z.write(os.path.join(dir_path, filename), f_path + filename)
    z.close()
    return outpath


def walk(root):
    for a, b, c in os.walk(root):
        for i in sorted(c):
            path = os.path.join(a, i)
            yield path, i


def ex(path):
    flag = 0
    try:
        path = os.path.abspath(path)
        print(path)
        flag = os.path.exists(path)
        # if flag:print("exists","-",path)
    except Exception as e:
        print(e, path)
    return flag


def mk(path):
    root = os.path.split(path)[0]
    if not os.path.exists(root):
        try:
            os.makedirs(root)
        except Exception as e:
            print(e, root)


def get_html(url, headers=""):
    if headers:
        proxies = {'https': "https://222.95.144.129:3000"}
        html = requests.get(url, headers=headers, proxies=proxies)
    else:
        html = requests.get(url)
    html.encoding = "gbk"
    return html.text.decode("gbk")


def get_html_b(url, headers=""):
    if headers:
        html = requests.get(url, headers=headers)
    else:
        html = requests.get(url)
    return html.content


def write(path, content, t="w", encoding="utf-8"):
    # print("writing","-",path)
    root = os.path.split(path)[0]
    if not os.path.exists(os.path.abspath(root)):
        try:
            os.makedirs(root)
        except Exception as e:
            print(e, root)

    if t == "w":
        with open(path, t, encoding=encoding)as f:
            f.write(content)
    else:
        with open(path, t)as f:
            f.write(content)
    # print("writed","-",path)


def read(path, t="r"):
    try:
        with open(path, t, encoding="utf-8")as f:
            return f.read()
    except Exception as e:
        print(e, path)


def down_file(url, path, headers):
    if not os.path.exists(path):
        html = requests.get(url, headers=headers)
        write(path, html.content, "wb")


if __name__ == "__main__":
    # for name,path in walk("/"):
    #     print(name)
    pass



