from tools import get_posting_list_info, Stock_id, build_json
from tools import Log, get_status, update_status, check_date, get_posting_list_url
from settings import STOCK_ID_NAME_JSON, STATUS_INFO_PATH, headers, BASE_URL, POSTING_LIST_URL
import pandas as pd
import json
import os
import requests
import random
from requests.compat import urljoin
from bs4 import BeautifulSoup
id_maps = Stock_id(STOCK_ID_NAME_JSON)
ids = id_maps.get_ids()

def build_listinfo_csv(stock_id, root_dir=None):
    dic_array = []
    # 初始化的页数
    page = 1
    # 查看这只股票爬取状态
    cur_info = get_status(stock_id)
    if cur_info:
        # 已经爬取过这支股票
        page = int(cur_info[0])
        status = cur_info[1]
        Log(f"-----------{stock_id} cur_page:{page}, status:{status}-----------")
        if status == "done":
            # 完成爬取则直接返回
            return 
    else:
        # 未爬取过这支股票，更新记录文件
        update_status(stock_id, 1, "undone")
        Log(f"-----------{stock_id} cur_page:1, status:undone-----------")
            
    Log(f"-----------start crawling {stock_id}-----------")
    cur_year = 2023
    last_month = 12
    is_first = True
    while cur_year >= 2020:
        Log(f"crawling page {page}")
        page_info = get_posting_list_api(stock_id, page)
        Log(f"{len(page_info)} postings")
        cur_date = page_info[-1]['最后更新时间']
        last_url = page_info[-1]['帖子链接']
        cur_year = int(check_date(last_url))
        Log(f"update to {cur_year}-{cur_date}")
        dic_array += page_info
        page += 1

        if (len(dic_array) > 250):
            Log(f"update {stock_id}.csv")
            # 更新爬取状态
            update_status(stock_id, page, "undone")
            df = pd.DataFrame(dic_array)
            if root_dir is None:
                df.to_csv(f"{id_maps.get_name_by_id(stock_id)}.csv", mode='a', header=is_first, encoding="utf-8_sig")
            else:
                df.to_csv(f"{root_dir}/{stock_id}.csv", mode='a', header=is_first, encoding="utf-8_sig")
            dic_array = []
            is_first = False
    # 完成整支股票爬取
    update_status(stock_id, page, "done")

def get_stock_top_100():
    files = os.listdir()
    with open("stock_id.json", 'r', encoding="utf-8") as f:
        dic = json.load(f)
    for id in dic:
        if f"{dic[id]}.csv" in files:
            Log(f"{dic[id]}.csv already exists")
            continue
        build_listinfo_csv(id)


def get_posting_url_from_list_page(suburl:str):
    """
    从例如 http://guba.eastmoney.com/list,002762.html 包含的表格中获取每一条帖子的绝对域名
    分别处理//caifuhao开头和/news开头的帖子
    """
    url = None
    if suburl.startswith('/news'):
        url = urljoin(BASE_URL, suburl)
    elif suburl.startswith('//caifuhao'):
        url = urljoin('http:', suburl)
    else:
        raise NotImplementedError(f"get_posting_url_from_list_page not implemented")
    return url
    
def parse_posting_list_info(posting_list):
    """
    posting_list : [<tr class = "listitem>]
    获取一组的帖子普通信息
    返回一个字典数组[{attr_name:value}]
    """
    def get_attr(item, attr_name:str):
        """
        获取某个item中的某个属性的值
        """
        attr = item.find('div', {
            "class", f"{attr_name}"
        })
        return attr
    info_arr = []
    attr_names = ["read", "reply", "title", "author", "update"]
    save_name_dic = {
        "read":'阅读量',
        "reply":'评论量',
        "title":'标题',
        "author":'作者',
        "update": '最后更新时间',
    }
    for posting in posting_list:
        info_dic = {}
        for name in attr_names:
            attr = get_attr(posting, name)
            info_dic[save_name_dic[name]] = attr.text
            if name == "title":
                info_dic['帖子链接'] = get_posting_url_from_list_page(attr.contents[0]['href'])
            elif name == "author":
                info_dic['作者链接'] = urljoin(BASE_URL, attr.contents[0]['href'])
        info_arr.append(info_dic)
    return info_arr

def get_url_from_stockid_and_page(stock_id, page):
    """
    获取stock_id股吧的第page页的url
    """
    stock_id = str(stock_id)
    page = str(page)
    if page != "1":
        return POSTING_LIST_URL+stock_id+",f_"+page+".html"
    else:
        return POSTING_LIST_URL+stock_id+".html"

def read_proxy_list(filename):
    with open(filename, 'r') as file:
        proxy_list = file.read().splitlines()
    return proxy_list

proxy_list = read_proxy_list('http.txt')

def get_posting_list_api(stock_id, page):
    """
    获取stock_id股吧的第page页的主要信息
    最后以pd.Dataframe的格式返回
    """

    global proxy_list
    #从列表中选择当前ip代理
    proxy = random.choice(proxy_list)
    Log(f"choose {proxy}")
    # create target url
    target_url = get_url_from_stockid_and_page(stock_id, page)
    # GET
    response = requests.get(target_url, headers=headers, proxies={'http': proxy})
    while response.status_code != 200:
        # re choose a proxy
        proxy = random.choice(proxy_list)
        Log(f"choose {proxy}")
        # re GET a response
        response = requests.get(target_url, headers=headers, proxies={'http': proxy})
    response.encoding = 'utf-8'
    html = response.text
    soup = BeautifulSoup(html, 'html.parser')
    posting_list = soup.select('#mainlist > div > ul > li.defaultlist > table > tbody')[0]
    lists = posting_list.find_all('tr', {'class', 'listitem'})
    info_arr = parse_posting_list_info(lists)
    return info_arr

def func(a):
    return a+1

if __name__ == "__main__":
    stock_ids = []
    with open('required_posting_list.txt', 'r', encoding="utf-8") as f:
        lines = f.readlines()
        for idx, id in enumerate(lines):
            if idx > 0:
                stock_ids.append(id[:-1])
            else:
                stock_ids.append(id[-7:-1])
    files = os.listdir('./required_posting_lists')
    for id in stock_ids:
        build_listinfo_csv(id, root_dir="./required_posting_lists")
    # check_date('https://caifuhao.eastmoney.com/news/20230720183037829722870?from=guba&name=6bi%2F5Y2a6IKh5Lu95ZCn&gubaurl=aHR0cDovL2d1YmEuZWFzdG1vbmV5LmNvbS9saXN0LDAwMjIyOS5odG1s')