import json
import time
import os
import logging
import urllib
import zipfile

import requests

logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler1 = logging.FileHandler('importFile.py.log')
handler1.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler1.setFormatter(formatter)
logger.addHandler(handler1)


def getFileType(suffix):
    """
    :param suffix:
    :return:
    """
    if suffix == "zip" or suffix == "shp":
        return 0x80000001
    elif suffix == "dwg":
        return 1
    else:
        raise Exception("不支持的文件类型,", suffix)


def importVectorFileWithAttachment(host, fileInfo, pgConn, dataSetName, files):
    """
    向新环境导入矢量数据
    :param fileInfo:
    :param pgConn:
    :return:
    """
    file_type = getFileType(fileInfo.get('suffix'))

    p = {
        "connParam": {"server": pgConn.get('host'),
                      "port": pgConn.get('port'),
                      "userName": pgConn.get('user'),
                      "password": pgConn.get('password'),
                      "dataBase": pgConn.get('database'),
                      "connType": 5
                      },
        "dataSetName": dataSetName,
        "offSetX": 0,
        "offSetY": 0,
        "offSetZ": 0,
        "dataConnectionType": file_type,  # 2147483649,
        "model": fileInfo.get('model'),
        "asyncImport": fileInfo.get('async'),
        "feedbackUrl": fileInfo.get('feedback'),
        "fileID": fileInfo.get('fileId'),
        "featureClassName": fileInfo.get('featureName'),
        "filePath": fileInfo.get('downloadUrl'),
        "fileName": fileInfo.get('fileName'),
        "fieldList": [
            {"name": "cim_layer_code", "type": 7},
            {"name": "cim_district_code", "type": 7}
        ],
    }

    url = "%s/api/data/import" % host
    params = {'importDataParam': json.dumps(p, ensure_ascii=False)}
    response = requests.post(url, params=params, files=files)
    logger.info("入参=%s,返回=%s" % (p, response.text))
    return response


def costTime(seconds):
    seconds = int(seconds)
    hours = seconds // 3600
    seconds %= 3600
    minutes = seconds // 60
    seconds %= 60
    return "{:02d}:{:02d}:{:02d}".format(hours, minutes, seconds)


def prepareShpData(path, prefix=None):
    lst = []
    for root, dirs, files in os.walk(path):
        for file in files:
            if file.endswith(".shp"):
                name = root.split("\\")[-1] + '_' + file[:-4]
                if prefix:
                    name = prefix + '_' + file[:-4]
                r = (name, [os.path.join(root, file), os.path.join(root, file[:-3] + 'dbf'),
                            os.path.join(root, file[:-3] + 'prj'), os.path.join(root, file[:-3] + 'shx')])
                lst.append(r)
    return lst


def testImportVectorWithAttachment(host, path, pgConn, dataSetName):
    lst = prepareShpData(path, prefix='taiwan')

    ret = []
    for name, files in lst:
        file1, file2, file3, file4 = files
        fileName = os.path.basename(file1)
        featureName = name + '_' + time.strftime('%Y%m%d%H%M%S')
        logger.info('featureClassExist, new name is %s' % featureName)

        fileInfo = {'suffix': 'shp', 'fileId': 0, 'fileName': fileName, 'dataSetName': dataSetName,
                    'featureName': featureName, 'async': False, 'model': 1}

        files = [('name', open(file1, 'rb')),
                 ('name', open(file2, 'rb')),
                 ('name', open(file3, 'rb')),
                 ('name', open(file4, 'rb')),
                 ]
        t1 = time.time()
        resp = importVectorFileWithAttachment(host, fileInfo, pgConn, dataSetName, files)
        ret.append(resp)
        t2 = time.time()
        logger.info("导入耗时,%s,%s" % (costTime(t2 - t1), file1))
        for n, f in files:
            f.close()
    return ret


# def download(url):


def get_current_path():
    current_file_path = __file__
    current_dir = os.path.dirname(current_file_path)
    print(f"当前文件路径: {current_dir}")
    return os.path.join(current_dir, 'data')


def getConnStrNew(pgConn):
    connType, server, database, port, user, pwd = 5, pgConn.get("host"), pgConn.get("database"), pgConn.get(
        "port"), pgConn.get("user"), pgConn.get("password")
    p = {"type": connType, "database": database, "server": server, "port": port, "userName": user, "password": pwd}
    return json.dumps(p, ensure_ascii=False)


def isDatasetExist(host, pgConn, featureSetName):
    p = {'connection': getConnStrNew(pgConn)}
    url = "%s/api/data/dataset/isExists/%s?" % (host, featureSetName)
    response = requests.get(url, params=p)
    if response.status_code == 200:
        ret = json.loads(response.text)
        return ret.get('data') is True


def queryFeatureClass(host, pgConn, featureSetName, featureClassName):
    p = {'connection': getConnStrNew(pgConn), }
    url = "%s/api/data/featureClass/query/%s/%s" % (host, featureSetName, featureClassName)
    response = requests.post(url, data=json.dumps(p, ensure_ascii=False),
                             headers={'Content-Type': 'application/json', 'Accept': 'application/json', })
    if response.status_code == 200:
        ret = json.loads(response.text)
        return ret.get('data')
    else:
        return None


def createDataSet(host, pgConn, datasetName):
    url = "%s/api/data/dataset/manage/%s" % (host, datasetName)
    p = {'connectionParameter': pgConn}
    resp = requests.post(url, data=json.dumps(p, ensure_ascii=False),
                         headers={'Content-Type': 'application/json', 'Accept': 'application/json', })
    logger.info("新建数据集,dataSet=%s,%s" % (datasetName, resp.text))


def createDatasetIfNotExists(host, pgConn, datasetName):
    if not isDatasetExist(host, pgConn, datasetName):
        createDataSet(host, pgConn, datasetName)


def download(file_lst, output):
    if not os.path.exists(output):
        os.makedirs(output)

    for url in file_lst:
        name = url[url.rfind('/') + 1:]
        output_path = os.path.join(output, name)
        # print(output_path,output,url.rfind('/'),url[url.rfind('/')+1:])
        # break
        try:
            url = url.replace(name, urllib.parse.quote(name))
            print(url)
            urllib.request.urlretrieve(url, output_path)
            print(f"文件已下载至：{output_path}")
        except Exception as e:
            print(f"下载失败：{e}")


def unzip(zip_file, output):
    if not os.path.exists(output):
        os.makedirs(output)

    # 打开ZIP文件并解压
    with zipfile.ZipFile(zip_file, 'r') as zip_ref:
        # 解压所有文件到目标目录
        zip_ref.extractall(output)
        print(f"所有文件已解压到 {output}")
