import json
import base64, os
import time
import os.path as osp
import shutil
import io, math
from requests import auth
import numpy as np
import sys
import cv2
import requests
from requests.auth import HTTPBasicAuth
from hashlib import md5
import hashlib
import zipfile
from tool import filesystem, via_tool

def auth_create_login(ip, uri="/api/auth/login"):
    url = "http://{}{}".format(ip, uri)

    data = {
        "username": "admin",
        "password": "lbld@2023"
    }

    res = requests.post(url, json=data)
    # print(res.status_code)
    # print(res.text)
    if res.status_code != 200: return None
    return res.json()["key"]


def api_projects(auth, ip, uri="/api/projects"):
    url = "http://{}{}".format(ip, uri)

    res = requests.get(url, auth=auth)
    print(res.status_code)
    print(res.text)

    # if res.status_code != 200: return None
    # return res.json()["key"]


def api_tasks(project_id, page, auth, ip, uri="/api/tasks"):
    url = "http://{}{}".format(ip, uri)

    data = {
        "project_id": project_id,
        "page": page
    }

    res = requests.get(url, auth=auth, params=data)
    # print(res.status_code)
    # print(res.text)
    if res.status_code != 200: return None
    return res.json()


def api_tasks_x(task_id, auth, ip, uri="/api/tasks/{}"):
    url = "http://{}{}".format(ip, uri.format(task_id))

    res = requests.get(url, auth=auth)
    # print(res.status_code)
    # print(res.text)
    if res.status_code != 200: return None
    return res.json()["key"]

def api_jobs_x(task_id, auth, ip, uri="/api/jobs"):
    url = "http://{}{}".format(ip, uri)

    data = {
        "task_id": task_id
    }

    res = requests.get(url, auth=auth, params=data)
    # print(res.status_code)
    # print(res.text)
    if res.status_code != 200: return None
    return res.json()

def api_jobs_download_anno(job_id, auth, ip, uri="/api/jobs/{}/annotations"):
    url = "http://{}{}".format(ip, uri.format(job_id))

    data = {
        "action": "download",
        "format": "CVAT for video 1.1",
    }

    res = requests.get(url, auth=auth, params=data)
    print(res.status_code, "\n")
    print(res.headers, "\n")
    print(res.content, "\n")
    if res.status_code != 200:
        return 

    save_name = res.headers["Content-Disposition"].split("filename=\"")[-1].split("\"")[0]
    print("save_name:", save_name)
    with open(save_name, "wb") as wf:
        wf.write(res.content)
    # if res.status_code != 200: return None
    # return res.json()["key"]


def api_jobs_download_dataset(save_path, job_id, auth, ip, uri="/api/jobs/{}/dataset"):
    if osp.exists(save_path):
        print("exit save_path:", save_path)
        return -1
    
    url = "http://{}{}".format(ip, uri.format(job_id))
    data = {
        "action": "download",
        "format": "CVAT for video 1.1",
    }

    res = requests.get(url, auth=auth, params=data)
    print("api_jobs_download_dataset:", res.status_code, "\n")
    print("res.content:", len(res.content), "\n")
    if res.status_code != 200:
        return -1

    # save_name = res.headers["Content-Disposition"].split("filename=\"")[-1].split("\"")[0]
    print("save_path:", save_path)
    with open(save_path, "wb") as wf:
        wf.write(res.content)
    return 0

def unzip_file(zip_path, save_dir):
    file=zipfile.ZipFile(zip_path)
    file.extractall(save_dir)
    file.close()
    # 修复编码
    # rename(save_path)


def auto_download_dataset(save_dir, project_id, auth, ip):
    if not osp.exists(save_dir):
        os.makedirs(save_dir)
    page_idx = 0
    while True:
        page_idx += 1
        page_data = api_tasks(project_id, page_idx, auth, ip)
        if page_data is None: continue
        if len(page_data["results"]) == 0: break

        for result in page_data["results"]:
            task_id = result["id"]
            job_infos = api_jobs_x(task_id, auth, ip)
            if job_infos is None: continue

            job_info = job_infos["results"][0]
            job_status = job_info["status"]
            if job_status != "completed":
                continue

            job_id = job_info["id"]
            # api_jobs_download_anno(job_id, auth, ip)
            cur_dir = osp.join(save_dir, str(job_id))
            if not osp.exists(cur_dir): os.makedirs(cur_dir)
            save_name = "{}.zip".format(job_id)
            save_path = osp.join(cur_dir, save_name)
            while (True):
                ret = api_jobs_download_dataset(save_path, job_id, auth, ip)
                if ret == 0: break
                time.sleep(2)

            unzip_file(save_path, cur_dir)


if __name__ == "__main__":

    auth=HTTPBasicAuth("admin", "lbld@2023")
    ip = "112.31.80.65:23080"
    # auth_key = auth_create_login(ip)
    # print(auth_key)

    # project_id = api_projects(auth, ip)
    # project_id = 3
    
    # page = 2
    # api_tasks(project_id, page, auth, ip)

    # task_id = 239
    # api_tasks_x(task_id, auth, ip)

    # task_id = 239
    # api_jobs_x(task_id, auth, ip)

    # job_id = 189
    # api_jobs_download_anno(job_id, auth, ip)

    # job_id = 189
    # api_jobs_download_dataset(job_id, auth, ip)

    # zip_dir = "/media/dataset/road_vihicle/images"
    # for zip_path in filesystem.get_all_filepath(zip_dir, [".ZIP", ".zip"]):
    #     print(zip_path)
    #     unzip_file(zip_path, osp.dirname(zip_path))

    # # save_dir = "cvat_data"
    # save_dir = sys.argv[1]

    # project_id = 3
    # auto_download_dataset(save_dir, project_id, auth, ip)



