import json
import os
from collections import defaultdict

import pandas as pd
import requests
from bs4 import BeautifulSoup


def get_stock_info(page: int = 1):
    # 获取股票信息
    stock_info_base_url = 'http://app.finance.ifeng.com/list/stock.php?t=hs&f=symbol&o=asc&p={page}'
    stock_info_url = stock_info_base_url.format(page=page)
    stock_info = requests.get(stock_info_url, timeout=30)
    stock_info.encoding = stock_info.apparent_encoding
    return stock_info.text

'''
:arg _stock_info: the html page of stock info
:return parsed_stock_info: the parsed stock info(dict)
'''
# 解析股票信息
def parse_stock_info(_stock_info, _row: int = 1):
    soup = BeautifulSoup(_stock_info, 'html.parser')
    # find all the tr tag and get the inner text
    stock_list = soup.find_all('tr')
    stock_list = [stock.text.split("\n") for stock in stock_list if stock != ['']]
    # filter out the first one
    tr = stock_list[0][1:-1]
    # save the stock info into a dict
    dic = defaultdict(list)
    stock = stock_list[_row]
    row = stock[1:-1]
    for i, (head, block) in enumerate(zip(tr, row)):
        dic[head].append(block)
    return dic


# def create_csv(out: str = './stock_info.csv'):
#     with open(out, 'w', encoding='utf-8') as f:
#         f.write("000651,格力电器,33.81,4.84%,1.56,729602手,243720万,32.70,32.25,32.40,33.95")
#         f.write(os.linesep)
#         pass
#
#
# # 保存股票信息
# # TODO: CHANGE THIS UGLY CODE
# def save_stock_info(_stock_info, out: str = './stock_info.csv', mode='w'):
#     # write it into a csv file
#     df = pd.DataFrame(_stock_info)
#     # cols=_stock_info.values()
#     # df[cols] = df[cols].apply(pd.to_numeric, index=False, errors='coerce')
#     df.to_csv(out, index=False, mode=mode, header=False)
#     pass

# def to_fund_table_vo(_parsed_stock_info):
#
#     return json.dumps({'code': _parsed_stock_info[0],'name':_parsed_stock_info[1],'netValue':_parsed_stock_info[2],'dailyGrowth':_parsed_stock_info[3]})

def get_fund_list(_stock_info):
    soup = BeautifulSoup(_stock_info, 'html.parser')
    # find all the tr tag and get the inner text
    stock_list = soup.find_all('tr')
    stock_list = [stock.text.split("\n") for stock in stock_list if stock != ['']]
    # filter out the first one
    return stock_list[1:-2]

def to_fund_table_vos(_stock_info):
    #jsons
    fund_table_vos=[]
    for stock in _stock_info:
        fund_table_vos.append({'code': stock[1],'name':stock[2],'netValue':stock[3],'dailyGrowth':stock[4]})
    return fund_table_vos

def get_all_funds():
    fund_table_vos=[]
    for i in range(200,201):
        stock_info=get_stock_info(page=i)
        fund_table_vos.extend(to_fund_table_vos(get_fund_list(stock_info)))
    return fund_table_vos

def get_paged_funds(page:int):
    fund_table_vos=[]


    stock_info=get_stock_info(page)
    fund_table_vos.extend(to_fund_table_vos(get_fund_list(stock_info)))
    return fund_table_vos

if __name__ == '__main__':
    print(get_all_funds())