from utils.resolove import *
import pandas as pd
import numpy as np
import time
import random
from tqdm import tqdm
import requests
from fake_useragent import UserAgent
ua = UserAgent()

def try_craw_info(fund_code, try_cnt):
    """
    @param fund_code:
    @return: 
    """
    if try_cnt > 5:
        return None, None
    try:
        """爬取页面，获取该基金的持仓数据"""
        position_data_url = "http://fundf10.eastmoney.com/FundArchivesDatas.aspx?type=jjcc&code=" + \
                            str(fund_code[1:]) + "&topline=10&year=&month=&rt=" + str(random.uniform(0, 1))
        print('第 {0} 次尝试，正在爬取基金 {1} 的持仓情况中...'.format(try_cnt, fund_code[1:]))
        # 解析基金的持仓情况
        response_data = requests.get(url=position_data_url, headers={'User-Agent': ua.random}, timeout=10)

    except:
        time.sleep(random.randint(2*try_cnt, 4*try_cnt))
        print("{0} 基金数据爬取失败，请注意！".format(str(fund_code[1:])))
        fund_positions_data = try_craw_info(fund_code, try_cnt+1)

    return fund_positions_data


def get_position_data(data, rank):
    """
    根据起始页码获取当前页面的所有Top数据
    @param data:
    @param rank:
    @return:
    """
    """筛选Top数据"""
    data = data.replace('', np.NaN, regex=True)
    data_notna = data.dropna(subset=['近1年'])
    data_notna['近1年'] = data_notna['近1年'].astype(float)
    data_sort = data_notna.sort_values(by='近1年', ascending=False)
    data_sort.reset_index(inplace=True)
    data_rank = data_sort.loc[0:rank-1, :]

    # 爬取每个基金的数据
    position_data = []
    error_funds_list = []
    print("开始爬取基金的持仓情况中...")
    for row_index, data_row in tqdm(data_rank.iterrows()):
        fund_code = str(data_row['基金代码'])
        #try:
            #"""爬取页面，获取该基金的持仓数据"""
        position_data_url = "http://fundf10.eastmoney.com/FundArchivesDatas.aspx?type=jjcc&code=" + \
                            str(fund_code[1:]) + "&topline=10&year=&month=&rt=" + str(random.uniform(0, 1))
        #print('正在爬取第 {0}/{1} 个基金 {2} 的持仓情况中...'.format(row_index + 1, len(data_rank), fund_code[1:]))
            # 解析基金的持仓情况
        response_data = requests.get(url=position_data_url, headers={'User-Agent': ua.random}, timeout=10)
        fund_positions_data = resolve_position_info(fund_code[1:], response_data.text)

            # 保存数据
        position_data.extend(fund_position_data for fund_position_data in fund_positions_data)
       # except:
           # error_funds_list.append(fund_code)
           # print("{0} 数据爬取失败，稍后会进行重试，请注意！".format(str(fund_code[1:])))
        # 随机休眠2-4 秒
        time.sleep(random.randint(2, 4))

    """爬取失败的进行重试"""
    for fund_info in error_funds_list:
        position_data_try = try_craw_info(fund_info, 1)
        position_data.extend(fund_position_data for fund_position_data in position_data_try)

    df_position_data = pd.DataFrame(position_data)
    print("基金的持仓数据爬取完成!")
    return df_position_data
