import json
import os
import logging
import sys
import zipfile
import oss2
import conf
import requests

logger = logging.getLogger(__name__)


def upload_to_oss(path, dir_name='', file_type='model'):
    oss = Oss()
    if not os.path.exists(path):
        print("文件不存在，建议如果当前文件夹使用: os.path.join(os.path.abspath('.'), 'best.pt')")
        return ''
    file_name = os.path.basename(path)
    key = f'{file_type}-sdk'
    if dir_name:
        key += f'/{dir_name}/{file_name}'
    url = oss.resumable_upload(key=key, filename=path)
    return url


def get_label_dataset(project_id, email=None, export_type: str = None, token=None, dist_path=None) -> (str, str, str):
    dist_path = dist_path if dist_path else f'/tmp/project_{project_id}'
    if not token and email:
        token = _label_studio_api_login(email)
    headers = {'Authorization': f'token {token}'}

    # 处理默认导出数据格式
    if not export_type:
        project_url = conf.LABEL_STUDIO_HOST + f"/api/projects/{project_id}"
        resp = requests.get(project_url, headers=headers)
        if resp.status_code != 200:
            raise '获取数据集信息失败，请确认project_id是否正确'
        export_type = get_default_export_type(json.loads(resp.content))
        print(f'默认导出格式为：{export_type}')
    export_type = export_type.upper()
    if not ExportType.check_type(export_type):
        print('导出类型不支持')
        return '', '', ''
    export_url = conf.LABEL_STUDIO_HOST + f"/api/projects/{project_id}/export?export_type={export_type}"
    resp = requests.get(export_url, headers=headers)
    image_path, file_extension = '', ''
    if resp.status_code == 200:
        file_name = resp.headers['filename']
        dir_name = _mkdir(dist_path)
        path = f'{dir_name}/{file_name}'

        with open(path, 'wb') as f:
            f.write(resp.content)

        root, file_extension = os.path.splitext(file_name)
        if file_extension == '.zip':
            _unzip(path, dir_name)
            annotations_path = f'{dir_name}/Annotations'
            if os.path.exists(annotations_path):
                path = annotations_path
            else:
                path = dir_name
            image_path = f'{dist_path }/images' if os.path.exists(image_path) else ''
        return path, image_path, file_extension
    return '', '', ''


def get_dataset_path(project_id, email=None, token=None, dist_path=None):
    project_url = conf.LABEL_STUDIO_HOST + f"/aipark/api/{project_id}/dataset/oss_url"
    dist_path = dist_path if dist_path else f'/tmp/project_{project_id}'
    if not token and email:
        token = _label_studio_api_login(email)
    headers = {
        'Authorization': f'token {token}'
    }
    resp = requests.get(project_url, headers=headers)
    if resp.status_code == 200:
        image_path = ''
        url = resp.json().get('oss_url')
        file_name = os.path.basename(url)
        resp = requests.get(url, headers=headers)
        if resp.status_code == 200:
            dir_name = _mkdir(dist_path)
            path = f'{dir_name}/{file_name}'
            open(path, mode='wb').write(resp.content)
            image_path = _mkdir(f'{dir_name}/images')
            _unzip(path, image_path)
        return image_path
    else:
        print(resp.status_code, resp.content)
        return ''


def get_project_list(page_num=1, page_size=100, email: str = ''):
    project_list_url = conf.AI_BACKEND_HOST + '/dataset/page'
    data = {'pageNum': page_num, 'pageSize': page_size}
    resp = requests.post(project_list_url, json=data)
    if resp.status_code == 200:
        resp_list = resp.json()
        if resp_list.get('result', False) and resp_list.get('data', {}).get('records', []):
            ret = {
                'total': resp_list['data']['total'],
                'size': resp_list['data']['size'],
                'records': [
                    {'id': r['id'], 'project_id': r['labelStudioProjectId'], 'name':r['name']}
                    for r in resp_list['data']['records']
                ]
            }
            return ret
    return {}


def _label_studio_api_login(email):
    login_url = conf.LABEL_STUDIO_HOST + '/api/login'
    data = {'email': email}

    resp = requests.post(login_url, json=data)
    if resp.status_code == 200:
        token = resp.json().get('token')
        return token
    return None


def _zip_file(zip_file_name: str, file_names: list):
    """
    将多个文件夹中文件压缩存储为zip
    :param zip_file_name:   /root/Document/test.zip
    :param file_names:      ['/root/user/doc/test.txt', ...]
    :return:
    """
    with zipfile.ZipFile(zip_file_name, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
        for fn in file_names:
            parent_path, name = os.path.split(fn)
            zf.write(fn, arcname=name)


def _unzip(zip_file_path: str, dst_path: str):
    with zipfile.ZipFile(zip_file_path, mode='r') as zf:
        zf.extractall(dst_path)


def _mkdir(dir_name: str):
    if not os.path.isdir(dir_name):
        os.makedirs(dir_name)
    return dir_name


class ExportType:
    JSON = 'json'
    JSON_MIN = 'json'
    CSV = 'csv'
    TSV = 'csv'
    CONLL2003 = 'conll'
    COCO = 'zip'
    VOC = 'zip'
    BRUSH_TO_NUMPY = 'npy'
    BRUSH_TO_PNG = 'png'
    ASR_MANIFEST = ''
    YOLO = 'zip'

    @classmethod
    def check_type(cls, s):
        return hasattr(ExportType, s)


def get_default_export_type(project) -> str:
    """
    Object Detection with Bounding Boxes 目标检测   parsed_label_config.label.type=RectangleLabels      VOC
    Semantic Segmentation with masks 语义分割       parsed_label_config.tag.type=BrushLabels            PNG
    Semantic Segmentation with Polygons 语义分割（多边形）parsed_label_config.label.type=PolygonLabels   COCO
    Image Classification 图像分类                   parsed_label_config.choice.type=Choices             CSV
    Text Classification 文本分类                    parsed_label_config.sentiment.type=Choices          CSV
    Names Entity Recognition（NER）命名实体识别      parsed_label_config.label.type=Labels               CSV
    Relation Extraction关系抽取(NER+分类)           parsed_label_config.label.type=Labels                CSV
    :param project:
    :return:
    """
    config = project.get('parsed_label_config', None)
    if not config:
        raise '数据集不合法，无法获得此数据集的默认导出格式'
    if config.get('label', {}).get('type', '') == 'RectangleLabels':
        return 'VOC'
    elif config.get('tag', {}).get('type', '') == 'BrushLabels':
        return 'PNG'
    elif config.get('label', {}).get('type', '') == 'PolygonLabels':
        return 'COCO'
    elif config.get('choice', {}).get('type', '') == 'Choices':
        return 'CSV'
    elif config.get('sentiment', {}).get('type', '') == 'Choices':
        return 'CSV'
    elif config.get('label', {}).get('type', '') == 'Labels':
        return 'CSV'
    else:
        raise '无法获得此数据集的默认导出格式'


class Oss:
    def __init__(self):
        # 阿里云账号AccessKey拥有所有API的访问权限，风险很高。强烈建议您创建并使用RAM用户进行API访问或日常运维，请登录RAM控制台创建RAM用户。
        self.auth = oss2.Auth(conf.OSS_ACCESS_KEY_ID, conf.OSS_ACCESS_KEY_SECRET)
        # yourEndpoint填写Bucket所在地域对应的Endpoint。以华东1（杭州）为例，Endpoint填写为https://oss-cn-hangzhou.aliyuncs.com。
        self.bucket = oss2.Bucket(self.auth, 'https://' + conf.OSS_ENDPOINT, conf.OSS_BUCKET_NAME)

    # 当无法确定待上传的数据长度时，total_bytes的值为None。
    def percentage(self, consumed_bytes, total_bytes):
        if total_bytes:
            rate = int(100 * (float(consumed_bytes) / float(total_bytes)))
            logger.info(f'upload percent:{rate}')
            sys.stdout.flush()

    def resumable_upload(self, key, filename):
        # yourObjectName填写Object完整路径，完整路径中不能包含Bucket名称，例如exampledir/exampleobject.txt。
        # yourLocalFile填写本地文件的完整路径，如果未指定本地路径，则默认从示例程序所属项目对应本地路径中上传文件。
        # 如未使用参数store指定目录，则会在HOME目录下建立.py-oss-upload目录来保存断点信息。
        # store指定了目录，则断点信息将保存在指定目录中。
        # num_threads设置并发上传线程数，请将oss2.defaults.connection_pool_size设置为大于或等于并发上传线程数。默认并发上传线程数为1
        # multipart_threshold 指定当文件长度大于或等于（默认值为10MB 100*1024）时，则使用分片上传。
        # part_size 设置分片大小，单位为字节，取值范围为100 KB~5 GB。默认值为100 KB。
        # progress_callback 设置上传回调进度函数。
        #
        oss2.resumable_upload(
            self.bucket, key=key, filename=filename, multipart_threshold=100 * 1024, part_size=2 * 1024,
            progress_callback=self.percentage, num_threads=4
        )
        return f"https://{conf.OSS_BUCKET_NAME}.{conf.OSS_ENDPOINT}/{key}"
