import json

import asf_search as asf

import os
from datetime import datetime
import shutil
import zipfile
from PIL import Image

from Data.DataStorage import HDFS
from Util.Process import INFO
from Util.Return import get_config, get_path, get_address, get_data_id
from Util.SQL import update


class SentinelAPI:
    # https://docs.asf.alaska.edu/asf_search/basics/
    def __init__(self):
        self.name = get_config("data", "Sentinel")['name']
        self.username = get_config("config", "sentinel_api_key")['username']
        self.password = get_config("config", "sentinel_api_key")['password']
        self.download_path = get_path(os.path.abspath(__file__), 4, get_config("data", "Sentinel")['download_path'])
        self.available_path = get_path(os.path.abspath(__file__), 4, get_config("data", "Sentinel")['available_path'])
        self.quicklook_path = get_path(os.path.abspath(__file__), 4, get_config("data", "Sentinel")['quicklook_path'])
        self.platform = get_config("data", "Sentinel")['platform']
        self.processingLevel = get_config("data", "Sentinel")['processingLevel']
        self.max_results = get_config("data", "Sentinel")['max_results']
        self.processes = get_config("data", "Sentinel")['processes']
        self.datatype = get_config("data", "Sentinel")['datatype']
        self.databand = get_config("data", "Sentinel")['databand']
        self.session = asf.ASFSession().auth_with_creds(self.username, self.password)

    def search(self, search_params):  # 通过API查询数据
        scenes = asf.geo_search(**search_params)
        INFO("INFO", 'Sentinel API: Found {} scenes found'.format(len(scenes)))
        return scenes

    def download(self, scenes):  # 通过API下载数据
        GRDH_scenes = []
        OTHER_scenes = []
        for scene_id in scenes:  # 区分两种类型下载
            INFO("INFO", f'Sentinel API: Start Download {scene_id}')
            level = scene_id.split('_')[2]
            if level == 'GRDH':
                GRDH_scenes.append(scene_id + "-GRD_HD")
            else:
                OTHER_scenes.append(scene_id + "-{level}".format(level=level))

        if len(GRDH_scenes) > 0:
            products = asf.product_search(GRDH_scenes)
            for product in products:
                with product.remotezip(session=self.session) as z:
                    file_paths = [file.filename for file in z.filelist
                                  if (self.databand in file.filename and file.filename.endswith('.tiff'))
                                  or ('quick-look' in file.filename)]
                    for file_path in file_paths:
                        z.extract(file_path, path=self.download_path)
        if len(OTHER_scenes) > 0:
            products = asf.product_search(OTHER_scenes)
            products.download(path=self.download_path, session=self.session, processes=self.processes)

    def preprocess(self, scenes):  # 处理文件转为标准的TIFF和QUICKLOOK
        success = []
        for scene_id in scenes:
            INFO("INFO", f'Sentinel API: Start Preprocess {scene_id}')
            level = scene_id.split('_')[2]
            if level == 'GRDH':
                file_path = os.path.join(self.download_path, scene_id + '.SAFE')  # 压缩包路径
                if os.path.exists(file_path):  # 判断是否存在
                    # 对tiff文件进行处理
                    tiff_path = os.path.join(file_path, "measurement")
                    tiff_file = os.path.join(tiff_path, scene_id + '.TIFF')
                    os.rename(os.path.join(tiff_path, os.listdir(tiff_path)[0]),  # 重命名
                              tiff_file)
                    shutil.move(tiff_file, self.available_path)
                    # 对quicklook文件进行转移
                    quick_path = os.path.join(os.path.join(file_path, "preview"), scene_id + '.png')
                    os.rename(os.path.join(file_path, "preview/quick-look.png"),  # 重命名
                              quick_path)
                    shutil.move(quick_path, self.quicklook_path)
                    # 删除该目录
                    shutil.rmtree(file_path)
                    success.append(True)
                else:
                    success.append(False)
            else:
                zip_path = os.path.join(self.download_path, scene_id + '.zip')  # 压缩包路径
                if os.path.exists(zip_path):  # 判断是否存在
                    with zipfile.ZipFile(zip_path, 'r') as zip:
                        for file_name in zip.namelist():
                            if 'quick-look.png' in file_name:
                                zip.extract(file_name, self.download_path)
                    file_path = os.path.join(self.download_path, scene_id + '.SAFE')  # 压缩包路径
                    # 对quicklook文件进行转移
                    quick_path = os.path.join(self.quicklook_path, scene_id + '.png')
                    image = Image.open(os.path.join(file_path, "preview/quick-look.png"))
                    image = image.transpose(Image.ROTATE_180)  # 旋转180度
                    image.save(quick_path)
                    shutil.rmtree(file_path)  # 删除该目录
                    success.append(True)
                else:
                    success.append(False)

    def upload(self, scenes):
        for scene_id in scenes:
            level = scene_id.split('_')[2]
            if level == 'GRDH':
                local_data_path = os.path.join(self.available_path, scene_id + ".TIFF")
                hdfs_path = "/" + get_config("data", "Sentinel")['available_path']
            else:
                local_data_path = os.path.join(self.download_path, scene_id + ".zip")
                hdfs_path = "/" + get_config("data", "Sentinel")['download_path']
            Flag = True
            hdfs = HDFS()  # 创建一个HDFS连接
            hdfs.push(hdfs_path, local_data_path)  # 上传数据
            os.remove(local_data_path)  # 删除本地数据
            if not hdfs.exist(hdfs_path + "/" + os.path.basename(local_data_path)):
                Flag = False
            hdfs.push("/" + get_config("data", "Sentinel")['quicklook_path'],
                      os.path.join(self.quicklook_path, scene_id + ".png"))
            if not hdfs.exist("/" + get_config("data", "Sentinel")['quicklook_path'] + "/" + scene_id + ".png"):
                Flag = False
            if Flag:
                update({"available": 1}, f'scene_id = "{scene_id}"', 'data')  # 更新数据为可用状态
                INFO("INFO", f"Sentinel API: {scene_id} data is available!")
            else:
                INFO("INFO", f"Sentinel API: {scene_id} data is not available!")

    def request(self, date, bbox=None, point=None):  # 请求且更新数据 bbox是一个元组
        platform = self.platform
        processingLevel = self.processingLevel
        result_list = []
        for index in range(len(platform)):
            if point is None:
                search_params = {
                    'intersectsWith': 'POLYGON(({xmin} {ymin},{xmax} {ymin},{xmax} {ymax},{xmin} {ymax},{xmin} {ymin}))'.format(
                        xmin=bbox[0], ymin=bbox[1], xmax=bbox[2], ymax=bbox[3]),
                    'platform': platform[index],
                    'processingLevel': processingLevel[index],
                    'start': date[0],
                    'end': date[1],
                    'maxResults': self.max_results
                }
            else:
                search_params = {
                    'intersectsWith': 'POINT({lng} {lat})'.format(lng=point['lng'], lat=point['lat']),
                    'platform': platform[index],
                    'processingLevel': processingLevel[index],
                    'start': date[0],
                    'end': date[1],
                    'maxResults': self.max_results
                }
            if processingLevel[index] == "":
                search_params.pop('processingLevel', None)
            scenes = self.search(search_params)
            result_list = result_list + self.resolve(scenes)
        return result_list

    def resolve(self, json_list):  # 解析API返回的查询json返回为数据库内的格式
        # 对原始json序列进行处理
        result = []
        for json_data in json_list:
            json_data = json.loads(str(json_data))
            time_key = ['processingDate', 'startTime', 'stopTime']
            for key in time_key:
                json_data['properties'][key] = datetime.strptime(json_data['properties'][key],
                                                                 "%Y-%m-%dT%H:%M:%SZ").strftime('%Y-%m-%d')
            level = 'GRD_HD' if json_data['properties']['fileID'].split('_')[2] == 'GRDH' else \
                json_data['properties']['fileID'].split('_')[2]
            #####################
            result_json = {
                'dataset': self.name,
                'scene_id': json_data['properties']['fileID'].replace("-{level}".format(level=level), ""),
                'data_id': get_data_id(),
                'type': self.datatype,
                'product': level,
                'satellite': 'Sentinel 1',
                'date': json_data['properties']['processingDate'],
                'address': get_address([((json_data['geometry']['coordinates'][0][0][0]+json_data['geometry']['coordinates'][0][2][0])/2, (json_data['geometry']['coordinates'][0][0][1]+json_data['geometry']['coordinates'][0][2][1])/2),
                                        (json_data['geometry']['coordinates'][0][0][0], json_data['geometry']['coordinates'][0][0][1]),
                                        (json_data['geometry']['coordinates'][0][3][0], json_data['geometry']['coordinates'][0][3][1]),
                                        (json_data['geometry']['coordinates'][0][1][0], json_data['geometry']['coordinates'][0][1][1]),
                                        (json_data['geometry']['coordinates'][0][2][0], json_data['geometry']['coordinates'][0][2][1])]),
                'bbox': (json_data['geometry']['coordinates'][0][0][0], json_data['geometry']['coordinates'][0][1][1],
                         json_data['geometry']['coordinates'][0][2][0], json_data['geometry']['coordinates'][0][3][1]),
                'cbox': ((json_data['geometry']['coordinates'][0][0][0], json_data['geometry']['coordinates'][0][0][1]),
                         (json_data['geometry']['coordinates'][0][3][0], json_data['geometry']['coordinates'][0][3][1]),
                         (json_data['geometry']['coordinates'][0][1][0], json_data['geometry']['coordinates'][0][1][1]),
                         (json_data['geometry']['coordinates'][0][2][0], json_data['geometry']['coordinates'][0][2][1])),
                'available': 0,
                'json': json_data
            }
            result.append(result_json)
        return result
