#!/data/ygsfb/virtualEnvironment/sentinel2/bin/python
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from psycopg2.extras import execute_values
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from datetime import datetime,timedelta
from dateutil.relativedelta import relativedelta
from tqdm import tqdm
import daemonize
import threading
import requests
import psycopg2
import paramiko
import logging
import asyncio
import atexit
import socket
import fcntl
import queue
import sched
import time
import json
import os
import re


def write_to_file(file_path,zip_path,mode):
    with open(file_path, mode=mode) as file_handle:
        try:
            fcntl.flock(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB) # 尝试获取文件锁
            while True:
                content = yield
                if content is None: 
                    fcntl.flock(file_handle, fcntl.LOCK_UN)
                    os.rename(file_path,zip_path)
                    return
                else:
                    file_handle.write(content)  # 写入内容
                    file_handle.flush()  # 实时将数据写入磁盘
        except BlockingIOError:
            raise Exception("文件已被其他进程占用，无法写入")
 
 
def split_periods(period):
        start_date,end_date = period
        start_date = datetime.strptime(start_date,"%Y-%m-%d")
        end_date = datetime.strptime(end_date,"%Y-%m-%d")
        split_date = datetime.now()-timedelta(days=5)
        if start_date <= split_date <= end_date:
            history_period = (
                start_date.strftime("%Y-%m-%d"), split_date.strftime("%Y-%m-%d"))
            current_period = (
                split_date.strftime("%Y-%m-%d"), end_date.strftime("%Y-%m-%d"))
        elif end_date < split_date:
            history_period = (
                start_date.strftime("%Y-%m-%d"), split_date.strftime("%Y-%m-%d"))
            current_period = None
        elif split_date < start_date :
            history_period = None
            current_period = (
                split_date.strftime("%Y-%m-%d"), end_date.strftime("%Y-%m-%d"))
        else:
            logger.debug("输入日期有误！")
        return history_period,current_period
    

def qhdm_2_tiles(cargs,qhdm):
        connection = psycopg2.connect(**cargs)
        connection.autocommit = True
        cursor = connection.cursor()
        query = f"""
                SELECT distinct china_tile.tile
                FROM china_tile
                JOIN dt_sy 
                on st_intersects(china_tile.geom,dt_sy.shape)
                WHERE dt_sy.qhdm = '{qhdm}' 
                """
        try:
            cursor.execute(query)
            connection.commit()
            data = cursor.fetchall()
        except Exception as e:
            connection.rollback()
        cursor.close()
        connection.close()
        return [i[0] for i in data] 


def updata_table(cargs,data):
    connection = psycopg2.connect(**cargs)
    connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
    connection.autocommit = True
    cursor = connection.cursor()
    query = f"""
        INSERT INTO sentinel2_l1c
        (name,id,tile,cloud,online,product_date,geom,create_date) 
        VALUES %s
        ON CONFLICT (name)
        DO UPDATE SET 
        online=excluded.online,
        id=excluded.id,
        tile=excluded.tile,
        cloud=excluded.cloud,
        product_date=excluded.product_date,
        geom=excluded.geom,
        create_date=excluded.create_date;
        """
    execute_values(cursor, query, data)
    cursor.close()
    connection.close()
    return


def analysis_product(tile,product):
    product_date = datetime.strptime(
        product['ContentDate']['Start'],"%Y-%m-%dT%H:%M:%S.%fZ")
    wkt = re.search(r"'(.*?)'",product['Footprint']).group(1)
    cloud = list(filter(lambda x:x['Name']=="cloudCover",product['Attributes']))[0]["Value"]
    record = (
        product['Name'].replace(".SAFE",""),
        product['Id'],
        tile,
        cloud,
        product['Online'],
        product_date,
        wkt,
        datetime.now())
    if product['Online']: # 获取在线可下载数据
        params = [
            product['Name'].replace(".SAFE",""),
            datetime.strptime(product['ContentDate']['Start'],"%Y-%m-%dT%H:%M:%S.%fZ"),
            product['Id']]
    else:
        params = []
    return record,params


def run_in_thread(name,daemon=True):
    def decorator(func):
        def wrapper(*args, **kwargs):
            thread = threading.Thread(
                target=func, 
                args=args, 
                name=name,
                daemon=daemon,
                kwargs=kwargs)
            thread.start()
            if daemon: return 
            else: return thread
        return wrapper
    return decorator


class Initialization:
    
    def __init__(self,period,save_dir,cargs,table_name):
        self.create_folder(save_dir,period)
        if not self.checker(cargs,table_name):
            self.create_table(cargs,table_name)

    @staticmethod
    def checker(cargs,table_name):
        query = f"""
            SELECT EXISTS (
                SELECT 
                FROM information_schema.tables 
                WHERE table_name='{table_name}');"""
        connection = psycopg2.connect(**cargs)
        connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
        cursor = connection.cursor()
        try:
            cursor.execute(query)
            connection.commit()
            check_table_result = cursor.fetchall()
        except Exception as e:
            connection.rollback()
        cursor.close()
        connection.close() 
        return check_table_result[0][0]
    
    @staticmethod
    def create_folder(save_dir,period):
        start_date,end_date = period
        start_date = datetime.strptime(start_date, '%Y-%m-%d')
        end_date = datetime.strptime(end_date, '%Y-%m-%d')
        current_date = start_date
        while current_date <= end_date:
            yearmonth = current_date.strftime('%Y%m')
            yearmonth_dir = os.path.join(save_dir,yearmonth)
            if not os.path.exists(yearmonth_dir):os.makedirs(yearmonth_dir)
            current_date += relativedelta(months=1)
        return 

    @staticmethod
    def create_table(cargs,table_name):
        logger.debug(f"Table '{table_name}' does not exist in the database.")
        query = f'''
            CREATE TABLE {table_name} (
                name VARCHAR(255) PRIMARY KEY,
                id VARCHAR(255),
                tile VARCHAR(255),
                cloud NUMERIC(10, 2),
                online bool,
                product_date timestamp,
                geom public.geometry(polygon, 4326),
                zip_path text,
                tif_path text,
                create_date timestamp);
                COMMENT ON COLUMN {table_name}.name IS '产品名称';
                COMMENT ON COLUMN {table_name}.id IS '下载ID';
                COMMENT ON COLUMN {table_name}.tile IS '图幅号';
                COMMENT ON COLUMN {table_name}.cloud IS '含云量';
                COMMENT ON COLUMN {table_name}.online IS '产品是否在线';
                COMMENT ON COLUMN {table_name}.geom IS '有效边界范围';
                COMMENT ON COLUMN {table_name}.product_date IS '产品日期';
                COMMENT ON COLUMN {table_name}.zip_path IS '原始zip存储路径';
                COMMENT ON COLUMN {table_name}.tif_path IS '合成10波段栅格存储路径';
                COMMENT ON COLUMN {table_name}.create_date IS '该条记录更新时间';'''
        connection = psycopg2.connect(**cargs)
        connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
        cursor = connection.cursor()
        try:
            cursor.execute(query)
            connection.commit()
        except Exception as e:
            connection.rollback()
        cursor.close()
        connection.close() 
        logger.debug(f"Table '{table_name}' exists in the database.")
        return
 
 
class Searcher:
    # 数字越小优先级越高
    # seen_set确保已近装载过的元素不会再次装载到队列中
    # 在Python中，`list`是不可哈希的类型，因此不能直接作为集合的元素。
    # 如果您需要将列表作为集合的元素，可以将其转换为元组，因为元组是可哈希的类型。
    
    def __init__(self,proxies):
        self.session = requests.Session()
        self.session.proxies.update(proxies)
        
    def __enter__(self):
        return self  # 返回自身或其他对象

    def __exit__(self, exc_type, exc_value, traceback):
        if exc_type is not None:
            print(f"Exception occurred: {exc_type}, {exc_value}")
        else:
            self.close()

    def __searcher_link(self,url,params=None):
        products = []
        try:
            with self.session.get(url,params=params) as response:
                result = response.json()
                if response.status_code == 200:
                    if "@odata.nextLink" in result.keys():
                        nextlink = result["@odata.nextLink"]
                        products.extend(self.__searcher_link(nextlink))
                    products.extend(result["value"])
                else:
                    print(response.status_code,response.json())
                    time.sleep(60*10)
                    products = self.__searcher_link(url,params=params)
        except requests.RequestException as e:
            print(e)
            time.sleep(60*10)
            products = self.__searcher_link(url,params=params)
        return products

    @run_in_thread(name="搜索者线程",daemon=False)
    def searcher_tile(self,sparams_queue,product_queue):
        while True:
            element = sparams_queue.get()
            if element: # 如果元素存在则执行查询
                tile,period,cloud = element
                start, end = period
                attributes = [
                    f"Attributes/OData.CSC.StringAttribute/any(att:att/Name eq 'tileId' and att/OData.CSC.StringAttribute/Value eq '{tile}')",
                    f"Attributes/OData.CSC.StringAttribute/any(att:att/Name eq 'processingLevel' and att/OData.CSC.StringAttribute/Value eq 'S2MSI1C')",#S2MSI1C
                    f"Attributes/OData.CSC.DoubleAttribute/any(att:att/Name eq 'cloudCover' and att/OData.CSC.DoubleAttribute/Value lt {cloud})",
                    f"ContentDate/Start gt {start}T00:00:00.000Z",
                    f"ContentDate/Start lt {end}T00:00:00.000Z"]
                payload = {
                    '$filter':" and ".join(attributes),
                    '$orderby': "ContentDate/Start desc",
                    '$count':'True',
                    '$expand': 'Attributes'} # Assets Attributes
                products = self.__searcher_link(
                    "https://catalogue.dataspace.copernicus.eu/odata/v1/Products",payload)
                product_queue.put((tile,products))
            else:
                product_queue.put(element)
                break
        return 

    def close(self):
        self.session.close()
        return


class Downloader:
    interval = 590
    timeout = 60

    def __init__(self,account,proxies,attmpt):
        retries = Retry(
            total=attmpt,
            connect=5,
            read=5,
            backoff_factor=0,# 0.5
            raise_on_status=True,
            status_forcelist=[104,500,502,503,504])
        adapter = HTTPAdapter(
            max_retries=retries,
            pool_connections=1,
            pool_maxsize=1)
        self.data = {
            'grant_type': 'password',
            'username': account['username'],
            'password': account['password'],
            'client_id': 'cdse-public'}
        self.session = requests.Session() 
        self.session.proxies.update(proxies)
        self.session.mount('http://', adapter)
        self.session.mount('https://', adapter)
        self.session.keep_alive = True
        self.refresh_token(self.data,self.interval)

    def __enter__(self):
        return self  # 返回自身或其他对象

    def __exit__(self, exc_type, exc_value, traceback):
        if exc_type is not None:
            print(f"Exception occurred: {exc_type}, {exc_value}")
        else:
            self.close()

    @run_in_thread(name="刷新token线程")
    def refresh_token(self,data,interval):
        try:  # 尝试获得访问token和刷新token
            with self.session.post(
                url="https://identity.dataspace.copernicus.eu/auth/realms/CDSE/protocol/openid-connect/token",
                headers={'Content-Type': 'application/x-www-form-urlencoded'},
                data=data,
                timeout=60) as response:
                if response.status_code == 200: # 若接受，获得访问token和刷新token
                    self.__access_token = response.json()['access_token']
                    interval = response.json()['expires_in']  
                    data = {
                            'grant_type': 'refresh_token',
                            'refresh_token': response.json()['refresh_token'],
                            'client_id': 'cdse-public'}
                else: # 若拒绝，使用账户获得访问token和刷新token
                    print(response.status_code,response.json())
                    interval = 10
                    data = self.data
        except requests.RequestException as e: # 若报错，使用账户获得访问token和刷新token
            print(e)
            interval = 10
            data = self.data
        finally:  # 根据新的请求参数，获得访问token和刷新token
            time.sleep(interval)
            self.refresh_token(data,interval)
        return

    def __download_file(self,productid,zip_path):
        if not os.path.exists(zip_path):  # 文件不存在
            temp_path = zip_path.replace('.zip','.incomplete')
            try:
                coroutine = write_to_file(temp_path,zip_path,'wb') # 创建协程对象
                next(coroutine)  # 尝试启动协程，写入文件
                try: # 执行下载！
                    with self.session.get(
                        url=f"https://zipper.dataspace.copernicus.eu/odata/v1/Products({productid})/$value",
                        headers={"Authorization": f"Bearer {self.__access_token}"},
                        timeout=self.timeout,
                        stream=True) as response:
                        if response.status_code == 200: # 下载完成状态设置成功
                            for chunk in response.iter_content(chunk_size=1024):
                                if chunk: coroutine.send(chunk) # 如果缓存不为空就传输
                            coroutine.send(None)
                            status = True 
                        else: # 下载完成状态等待1分钟设置失败,
                            print(f"{response.status_code} {response.json()}")
                            time.sleep(60)
                            status = False 
                except requests.RequestException as e: # 下载报错,再次启动下载
                    print(e)
                    time.sleep(60*10)
                    status = self.__download_file(productid,zip_path)
                finally:
                    coroutine.close()
            except Exception as e:  # 如果文件被占用，等待文件处理结束
                print(e)
                while not os.path.exists(zip_path):
                    time.sleep(60)
                status = True 
        else: # 文件存在
            status = True 
        return status

    @run_in_thread(name="下载线程",daemon=False)
    def download_task(self,download_queue,record_queue):
        private_attribute = f'_{self.__class__.__name__}__access_token'
        while not hasattr(self, private_attribute):time.sleep(1)
        while True:
            priority, element = download_queue.get()
            if element:
                name,zip_path,productid = element
                status = self.__download_file(productid,zip_path)
                if status: # 成功下载到文件，日志记录成功
                    line = f"{name} success {datetime.now()}"
                    record_queue.put((zip_path,line))
                else: # 未能下载到文件
                    line = f"{name} failure {datetime.now()}"
                    record_queue.put((None,line))
            else:
                download_queue.put((priority, element))
                break
        return
       
    def close(self):
        self.session.close()
        return


async def async_function1(queue_test):
    while True:
        data = await queue_test.get()
        if data is None: break
        await asyncio.sleep(2)
        print("function1 Received data in async function:", data)

async def async_function2(queue_test):
    while True:
        data = await queue_test.get()
        if data is None: break
        await asyncio.sleep(3)
        print("function2 Received data in async function:", data)

async def main_t():
    queue_test = asyncio.Queue()
    task1 = asyncio.create_task(async_function1(queue_test))
    task2 = asyncio.create_task(async_function2(queue_test))
    
    print('启动异步函数')
    for i in range(100): await queue_test.put(i)
    await queue_test.put(None)
    
    await asyncio.gather(task1, task2)
    
    # await task1,task2
    return
    
def run_async_function():
    asyncio.run(main_t())
    return


@run_in_thread(name="加载者线程",daemon=False)
def loader(tiles,historyp,currentp,cloud,interval,sparams_queue):
    if historyp: # 若历史时期存在，则执行立即查询、更新表、加载任务
        for tile in tqdm(tiles,disable=False,desc='history'):
            sparams_queue.put((tile,historyp,cloud))
    if currentp: # 如果近期存在，则执行定时查询、更新表、加载任务
        start_date, end_date = currentp
        start_date = datetime.strptime(start_date,"%Y-%m-%d")
        end_date = datetime.strptime(end_date,"%Y-%m-%d")
        while start_date > datetime.now(): time.sleep(60*60)# 若开始日期在今天之后，则等待1小时为周期
        while datetime.now() <= end_date + timedelta(days=5): # 今天小于执行结束日期，可执行
            if datetime.now()-timedelta(days=5) > start_date: # 若开始日期在前五天之前，开始日期重置为前五天
                sdate = datetime.now()-timedelta(days=5) 
            else:
                sdate = start_date
            if end_date > datetime.now(): # 若结束日期在今天之后，结束日期重置为明天
                edate = datetime.now()+timedelta(days=1)
            else:
                edate = end_date
            period = (sdate.strftime("%Y-%m-%d"),edate.strftime("%Y-%m-%d"))
            for tile in tqdm(tiles,disable=False,desc='current'):
                sparams_queue.put((tile,period,cloud))
            time.sleep(interval)  # 8小时为周期
    sparams_queue.put(False)
    return


@run_in_thread(name="生产者线程",daemon=False)
def search(tiles,historyp,currentp,cargs,proxies,dparams_queue):
    cloud = 100
    interval = 8*60*60
    
    # 搜索队列
    sparams_queue = queue.Queue(maxsize=10000) 
    # 产品队列
    product_queue = queue.Queue(maxsize=10000)

    # 定时装载任务
    loader_thread = loader(tiles,historyp,currentp,cloud,interval,sparams_queue)
    
    # 查询数据线程
    searcher =  Searcher(proxies) 
    searcher_thread = searcher.searcher_tile(sparams_queue,product_queue)
    
    # 处理产品信息
    id_infos = set()
    while True:
        element = product_queue.get()
        if element:
            tile, products = element
            records = []
            for product in products:
                if product['Id'] not in id_infos:
                    id_infos.add(product['Id'])
                    record,params = analysis_product(tile,product)
                    records.append(record)
                    dparams_queue.put(params)  # 添加下载队列
            updata_table(cargs,records)  # 更新表
        else:
            dparams_queue.put(element)
            break

    loader_thread.join()
    searcher_thread.join()
    searcher.close()
    return


@run_in_thread(name="过滤者线程",daemon=False)
def filter_tasks(task_id,save_dir,dparams_queue,download_queue,record_queue,zipfile_queue):
    exist_names = set()
    log_path = f"{task_id}.log"
    if os.path.exists(log_path): # 如果记录文件存在
        with open(log_path, 'r') as file:
            for line in file.readlines():
                if "success" in line:  # 获取success标记的name
                    name = line.split(" ")[0]
                    exist_names.add(name)
    while True:
        element = dparams_queue.get()
        if element:
            name,date,productid = element
            zip_path = os.path.join(save_dir,date.strftime("%Y%m"),f"{name}.zip")
            if name not in exist_names:  # 未在下载记录中
                if os.path.exists(zip_path): # 若文件存在
                    line = f"{name} success {datetime.now()}"
                    record_queue.put((zip_path,line))
                else:
                    priority = -int(date.strftime("%Y%m%d"))
                    download_queue.put((priority, (name,zip_path,productid)))
            else:
                zipfile_queue.put(zip_path)
        else:
            priority = 0
            download_queue.put((priority,element))
            break
    return


@run_in_thread(name="消费者线程",daemon=False)
def download(task_id,save_dir,account,proxies,dparams_queue,zipfile_queue):
    attmpt = 10
    download_queue = queue.PriorityQueue(maxsize=10000)
    record_queue = queue.Queue(maxsize=10000)
    
    # 过滤者线程
    filter_thread = filter_tasks(
        task_id,save_dir,dparams_queue,download_queue,record_queue,zipfile_queue)
    
    # 下载数据线程
    downloader = Downloader(account,proxies,attmpt) 
    downloader_threads = []
    for _ in range(4):
        thread = downloader.download_task(download_queue,record_queue)
        downloader_threads.append(thread)
    
    # 记录文件是否存在
    while not(record_queue.empty() and not all(
        [t.is_alive() for t in downloader_threads])):
        zip_path, line  = record_queue.get()
        with open(f"{task_id}.log", 'a+') as file:
            file.write(line+"\n")
            file.flush()
        if zip_path:
            zipfile_queue.put(zip_path)
    zipfile_queue.put(False) 
    
    filter_thread.join()
    for thread in downloader_threads: thread.join()
    downloader.close()
    return


@run_in_thread(name="传输者线程",daemon=False)
def sender(ssh_args,remote_dir,zipfile_queue):
    while True:
        element = zipfile_queue.get()
        if element:
            local_path = element
            if os.path.exists(local_path):
                file_name = "/".join(local_path.split('/')[-2:])
                remote_path = os.path.join(remote_dir,file_name)
                remote_date_path = os.path.dirname(remote_path)
                try:
                    ssh = paramiko.SSHClient()
                    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
                    ssh.connect(*ssh_args)
                    sftp = ssh.open_sftp()
                    try:
                        sftp.listdir(remote_date_path)
                    except IOError as e:
                        sftp.mkdir(remote_date_path)
                    try:
                        sftp.stat(remote_path)
                    except FileNotFoundError as e:
                        temp_path = remote_path.replace('.zip', '.incomplete')
                        try:
                            sftp.stat(temp_path)
                        except FileNotFoundError as e:
                            sftp.put(local_path, temp_path, confirm=True)
                            sftp.rename(temp_path, remote_path)
                except Exception as e:
                    time.sleep(60*10)
                    zipfile_queue.put(element)
        else:
            break
    sftp.close()
    ssh.close()
    return


@run_in_thread(name="网络监控线程",daemon=True)
def check_internet_connection():
    while True:
        try:
            response = requests.get("http://www.baidu.com", timeout=5)
            if response.status_code == 200:
                print("网络通畅，可以连接互联网")
            else:
                print("网络连接异常，无法连接互联网")
        except requests.RequestException as e:
            print("网络连接异常，无法连接互联网")
        time.sleep(1)
    return

 
def main(task_id,qhdm,period,account_index):
    # time.sleep(20) # 等待，为防止超过最大连接数
    with open('./config.json',"r") as file:
        params = json.load(file)
        proxy = params["proxy"]
        accounts = params["accounts"]
        cargs = params["cargs"]
        save_dir = params["save_dir"]
        table_name = params["table_name"]
        interval = params['interval']
        ssh_args = params['ssh_args']
        remote_dir = params['remote_dir']
    account = params["accounts"][account_index]
    print("初始化")
    Initialization(period,save_dir,cargs,table_name)
    tiles = qhdm_2_tiles(cargs,qhdm)
    historyp,currentp=split_periods(period)
    proxies = {
        'http': f"{proxy['ip']}:{proxy['port']}",
        'https': f"{proxy['ip']}:{proxy['port']}"}
    # 用于线程间通讯
    dparams_queue = queue.Queue(maxsize=10000) 
    zipfile_queue = queue.Queue(maxsize=10000) 
    # 生产者线程
    search_thread = search(tiles,historyp,currentp,cargs,proxies,dparams_queue)
    # 消费者线程
    download_thread = download(task_id,save_dir,account,proxies,dparams_queue,zipfile_queue)
    # 传输者
    sender_thread = sender(ssh_args,remote_dir,zipfile_queue)
    # 等待所有子进程结束
    search_thread.join()
    download_thread.join()
    sender_thread.join()
    return 


if __name__ == "__main__":
    task_id = 'test1234'
    qhdm = '41'
    period = ['2024-02-20', '2024-03-20']
    account_index = 0
    # main(task_id,qhdm,period,account_index)
    run_async_function()
    # file_path = "tes t.txt"
    # contents = ["asdfa"] * 100
    # write_to_file(file_path, contents)

    
    



    
  


