# -*- coding: utf-8 -*-
import sys
import pandas as pd
import time
from sqlalchemy import create_engine
import pymysql
import os
import json
import tqdm
import requests
from urllib.parse import urlparse
from sqlalchemy import create_engine
import concurrent
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import ThreadPoolExecutor, as_completed
import warnings
import traceback
import os

################################## 主要修改点 #############################################
python_file_path = os.path.dirname(os.path.abspath(__file__))
save_dir = os.path.join(python_file_path, 'raw_012_am240327') 
print(f"下载保存的目录：{save_dir}")
id_column = 'app_order_id'

## 特别注意sql中必须有file_name字段用于数据切块下载
sql = f"""
-- 提取订单对应的applist和sms 信息;
with extension_loans as (
     select app_order_id,count(1) order_c from am_system.t_installment a  group by 1 having count(1)>=2
),no_extension_loans as (
    select
        a.*
    from am_system.t_installment a
    left join extension_loans b on a.app_order_id = b.app_order_id
    where b.app_order_id is null -- 剔除所有的展期的用户及其所有的订单
),map_info as (
    select
	    concat('raw_am_',a.new_old_user_status,'_',substr(a.create_time,1,10),'.parquet') file_name,
	    b.acq_channel,b.product_code,b.product_set_code,b.product_name,b.device_type,b.user_id,
        a.app_order_id,a.new_old_user_status,d.tx_id,a.create_time loan_time,a.repayment_date,b.apply_time,b.id_card_number,b.phone_number
        ,case when a.overdue_days >=1 and DATEDIFF(DATE(DATE_SUB(NOW(), interval 14 hour)),a.repayment_date)>=1 then 1 end  as def_pd1
        ,case when  DATEDIFF(DATE(DATE_SUB(NOW(), interval 14 hour)),a.repayment_date)>=1 then 1 end  as agr_pd1
        ,CASE WHEN a.STATUS=1 and DATEDIFF(DATE(DATE_SUB(NOW(), interval 14 hour)),a.repayment_date)>=0  THEN 1  END  AS def_cpd
        ,case when  DATEDIFF(DATE(DATE_SUB(NOW(), interval 14 hour)),a.repayment_date)>=0 then 1 end   as agr_cpd
        ,c.create_time sms_upload_time,
        c.app_list_url applist,c.sms_records_url sms,
        c.device_info
    from no_extension_loans a
    inner join am_system.t_app_order b on a.app_order_id = b.app_order_id
    left join am_system.t_app_track c on b.app_track_id = c.id
    left join am_system.t_risk_req_record d on a.app_order_id=  d.biz_id
     where b.new_old_user_status in (0,1,2)  and b.device_type = 'ios' -- and a.acq_channel = 'AIMX'
)select * from map_info  order by sms_upload_time desc;
"""
################################### 主要修改点 #############################################


def mkdir_if_not_exists(dir_path):
    if os.path.exists(dir_path):
        return
    os.mkdir(dir_path)
mkdir_if_not_exists(save_dir)  # 本地下载的数据


def set_pd_show(max_rows=500, max_columns=200, max_colwidth=70):
    pd.set_option("display.max_rows", max_rows)
    pd.set_option("display.max_columns", max_columns)
    pd.set_option("display.max_colwidth", max_colwidth)


def set_warnings(need=True):
    """
    是否控制台报警
    :param need: 默认为True 表示开启，False则关闭
    :return:
    """
    if need:
        warnings.filterwarnings('default')
    else:
        warnings.filterwarnings('ignore')


def mysql_engine(host, port, user, passwd, db=None):
    try:
        engine = create_engine(f'mysql+pymysql://{user}:{passwd}@{host}:{port}/{db}')
        return engine
    except Exception as e:
        print(f"An error occurred: {e}")


# 根据df中的url来下载数据
from tqdm import tqdm
import requests
from urllib.parse import urlparse


def is_valid_url(url):
    try:
        result = urlparse(url)
        return all([result.scheme, result.netloc])
    except ValueError:
        return False


def download_data(df, url_column, id_column):
    if url_column in df.columns and id_column in df.columns:
        df[url_column + '_data'] = None
        with requests.Session() as session:
            for i, row in df.iterrows():
                url = row[url_column]
                id_column_value = row[id_column]
                if isinstance(url, str) and is_valid_url(url):  # 检查URL是否是字符串并且有效
                    response = session.get(url, stream=True)
                    if response.status_code == 200:
                        # total_size_in_bytes = int(response.headers.get('content-length', 0))
                        block_size = 1024  # 1 Kibibyte
                        # progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True) # 如果下大文件则开启
                        data = b''.join(data_chunk for data_chunk in response.iter_content(block_size))
                        df.loc[i, url_column + '_data'] = data.decode()
                        # progress_bar.close()
                    else:
                        print(
                            f"Failed to download data for {id_column}:{id_column_value} from {url}: {response.status_code}")
                else:
                    if not (url == None or (isinstance(url, str) and len(url.strip()) == 0)):
                        print(f"url format is erro ,{id_column}:{id_column_value},url:{url}")
    else:
        raise ValueError(f"请检查数据中是否包含这两列:{url_column},{id_column}")
    return df


def task(df, file_name, pbar):
    try:
        start_time0 = time.time()
        print(
            f"start to down the data {file_name} ,shape is {df.shape}")
        df = download_data(df, 'applist', id_column)
        save_path = os.path.join(save_dir, file_name)
        df.to_parquet(save_path, compression='zstd')
        end_time0 = time.time()
        print(f"{save_path} has saved,new shap {df.shape},耗时:{round(end_time0 - start_time0, 3)} s")  #
        time.sleep(0.5)
        pbar.update(1)
        return df.shape
    except Exception as e:
        print(f"多线程任务出现了异常，信息如下{e}")
        traceback.print_exc()
    # finally:



if __name__ == '__main__':
    start_time = time.time()
    set_warnings(False)
    set_pd_show()

    mysql_conn = mysql_engine('47.253.56.86',4001,'longxiaolei','WWsCQ9TXG+BJBHlK','rule')
    print(f'基础sql:\n{sql}')
    down_df = pd.read_sql(sql, mysql_conn) # type: ignore
    print(f"待下载的数据量为{down_df.shape}")

    # 校验url的逻辑
    print("针对url的格式进行第一轮检查====>")
    down_df['applist_check'] = down_df['applist'].map(is_valid_url)
    down_df['sms_check'] = down_df['sms'].map(is_valid_url)

    check_rs = down_df.groupby('applist_check')['applist'].nunique()
    print("applist url url检查结果如下：")
    print(check_rs)
    check_rs = down_df.groupby('sms_check')['sms'].nunique()
    print("sms url url检查结果如下：")
    print(check_rs)

    print(f"开始计算数据切片")
    grouped_df = down_df.groupby(['file_name'])[id_column].count().reset_index()

    print(f"数据切片的计算结果如下:")
    print(grouped_df)
    
#     grouped_df = down_df.groupby(['file_name'])[id_column].count().reset_index().sort_values(id_column,ascending=False)
    pool = ThreadPoolExecutor(15)
    with (tqdm(total=len(grouped_df), desc="Downloading") as pbar):
        all_task_result = []
        for i, row in grouped_df.iterrows():
            file_name = row['file_name']
            cond = (down_df['file_name'] == file_name)
            df = down_df[cond].copy()
            all_task_result.append(
                pool.submit(task, df, file_name, pbar))
        for task_result in as_completed(all_task_result):
            msg = task_result.result()
    end_time = time.time()

    print(f"本次计算结束，共计生成{len(grouped_df)}个文件，耗时 {round(end_time - start_time, 3)} s")
