import re,os,traceback,json,requests,time
import pandas as pd
from tqdm import tqdm
import oss2
from concurrent.futures import ThreadPoolExecutor, as_completed

def data_of_dir(dir_path: str, contains_flags="", start_time='2023-01-01', end_time='2999-01-01'):
    """
    基于关键字以及时间来扫描文件目录中的数据块
    :param dir_path: 待扫描的目录
    :param contains_flags: 关键字标签，同时支持 字符串以及 字符串数组两种格式
    :param start_time: 开始时间，如果 是'2023-01-01' 格式，则按照日期进行扫描，否则为月份扫描
    :param end_time: 结束时间，本函数采用左开右开的方式进行时间范围框定
    :return:
    """
    def _fetch_filenams(dir_path: str, contain_flag, start_date=None, end_date=None):
        file_paths = []
        contain_flag = contain_flag or ""
        pattern = r"\d{4}-\d{2}-\d{2}"
        pattern_month = r"\d{4}-\d{2}"
        for file_name in os.listdir(dir_path):
            if (contain_flag in file_name) and (file_name.endswith(('.fth','.pqt', '.parquet', '.csv', '.xlsx', '.pickle', '.pkl'))):
                if start_date is None:
                    file_paths.append(os.path.join(dir_path, file_name))
                else:
                    start_time_match = re.search(pattern, start_date)
                    if start_time_match:
                        match = re.search(pattern, file_name)
                        date = match.group()  # type: ignore
                        if (date >= start_date) and (date <= end_date):
                            file_paths.append(os.path.join(dir_path, file_name))
                    else: # 如果start_date 不是 日期格式，则使用月份的格式进行匹配
                        match = re.search(pattern_month, file_name)
                        date = match.group()  # type: ignore
                        if (date >= start_date) and (date <= end_date):
                            file_paths.append(os.path.join(dir_path, file_name))
        file_paths.sort()
        return file_paths

    if isinstance(contains_flags, str) or contains_flags is None:
        return _fetch_filenams(dir_path, contains_flags, start_time, end_time)
    elif isinstance(contains_flags, list):
        file_names = None
        for contains_flag in contains_flags:  # type: ignore
            if file_names is None:
                file_names = _fetch_filenams(dir_path, contains_flag, start_time, end_time)
            else:
                file_names = file_names + _fetch_filenams(dir_path, contains_flag, start_time, end_time)
        return file_names

def batch_load_data(file_paths, load_function=pd.read_parquet):
    result = None
    for file_path in tqdm(file_paths):
        try:
            if result is None:
                result = load_function(file_path)
            else:
                result = pd.concat([result, load_function(file_path)])
        except Exception:
            print(f"文件加载异常:{file_path},跳过该文件\n{traceback.format_exc()}")
    return result

def pase_json_dict(df,json_column,columns):
    """
    解析指定的json列，并将
    :param df: 数据列表
    :param json_column: json字符串
    :param columns: 需要解析出来的字段列表
    :return:
    """
    def _query_dict(data, key):
        nodes = key.strip("$.").split(".")
        result = data
        for node in nodes:
            if isinstance(result, dict) and node in result:
                result = result[node]
            else:
                return None
        return result

    def _query_json(json_str, keys):
        try:
            obj = json.loads(json_str)
            if isinstance(keys, str) and len(keys) > 0:
                return _query_dict(obj, keys)
            if isinstance(keys, list) and len(keys) > 0:
                result_arr = []
                for query in keys:
                    result_arr.append(_query_dict(obj, query))
                return tuple(result_arr)
        except Exception as e:
            print(f'解析发生医生:{e}')
            return None

    df[columns] = df.apply(lambda x : _query_json(x[json_column],columns),axis=1,result_type='expand')
    return df


def parse_json_list(df, json_column, id_column, retain_column=None):
    """
    将df中的json字符串的这一列摊平，并根据想要保留的字段，保留相关的值
    :param df:  待摊平处理的dataframe
    :param json_column: json字符串所在的那一列
    :param id_column: 唯一关联主键，后续用于定位转化失败的数据，或外部唯一关联的id
    :param retain_column: 待保留的列，可以是字符串，也可以是数组保存多列
    :return:
    """
    new_rows = []
    for i, row in df.iterrows():
        json_data_str = row[json_column]
        id_column_value = row[id_column]
        if isinstance(json_data_str, str):
            try:
                json_data = json.loads(json_data_str)
                for item in json_data:
                    new_row = item
                    new_row[id_column] = id_column_value
                    if isinstance(retain_column, str) and len(retain_column) > 0:
                        new_row[retain_column] = row[retain_column]
                    elif isinstance(retain_column, list) and len(retain_column) > 0:
                        for column in retain_column:
                            new_row[column] = row[column]
                    new_rows.append(new_row)
            except Exception as e:
                print(f"异常数据 {id_column}:{e}")
                continue
    new_df = pd.DataFrame(new_rows)
    return new_df


def fetch_app_ginfo(package_list, retries=3):
    """
    批量从google中爬取分类和描述信息
    :param package_list: 格式为list,待爬取的package_id的list
    """
    from bs4 import BeautifulSoup
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36"}
    tmp_arr = []
    for package_id in tqdm(package_list):
        url = f'https://play.google.com/store/apps/details?id={package_id}'
        rs = {'package_id': package_id, 'desc': None, 'gcate': None, 'update': None, 'downloads': None}
        i = retries
        while i > 0:
            try:
                response = requests.get(url, headers=headers, timeout=10)
                if response.status_code == 200:
                    soup = BeautifulSoup(response.content, 'html.parser')
                    div_desc = soup.find('div', class_='bARER', attrs={'data-g-id': 'description'})
                    if div_desc:
                        rs['desc'] = div_desc.get_text()

                    div_update = soup.find('div', class_='xg1aie')
                    if div_update:
                        rs['update'] = div_update.get_text()

                    div_downloads = soup.find('div', class_='w7Iutd')
                    if div_downloads:
                        class_divs = div_downloads.find_all('div', class_='wVqUob')
                        for class_div in class_divs:
                            downloads_tag = class_div.find('div', class_='g1rdde')
                            if (downloads_tag is not None) and (downloads_tag.get_text() == 'Downloads'):
                                download_num_div = class_div.find('div', class_='ClM7O')
                                if download_num_div:
                                    rs['downloads'] = download_num_div.get_text()

                    div_cate_tag = soup.find('div', class_='Uc6QCc')
                    if div_cate_tag:
                        rs['gcate'] = div_cate_tag.find(
                            lambda tag: tag.has_attr('jsname') and tag['jsname'] == 'hSRGPd' \
                                        and 'category' in (tag.get('href') or '')).get('href').split('/')[-1]
                    tmp_arr.append(rs)
                    break
                elif response.status_code == 404:
                    tmp_arr.append(rs)
                    break
            except Exception as e:
                i = i - 1
                time.sleep(3)
                if i == 0:
                    print(f"解析{package_id}爬虫信息异常，重试3次依然含有异常情况,\n{traceback.format_exc()}")
                    tmp_arr.append(rs)
                continue
    return pd.DataFrame(tmp_arr)
