from settings import STOCK_ID_NAME_JSON
from tools import *
id_maps = Stock_id(STOCK_ID_NAME_JSON)

csv_path = get_csv_path("襄阳轴承.csv")

# posting_lists = Stock_csv(csv_path)

def get_next_comment_page_url(posting_url, page):
    if page == 1:
        return posting_url
    base = posting_url.split(".")[:-1]
    base[-1] += f"_{page}"
    new_url = ".".join(base)
    new_url += ".html"
    return new_url

def save_posting_detail(stock_id, posting_url, posting_type):
    # posting_url = "https://guba.eastmoney.com/news,000977,1325409452.html"
    Log("")
    save_root = f"./posting_detail/{stock_id}"
    if not os.path.exists(save_root):
        Log(f"{save_root} not exists")
        Log(f"create {save_root}")
        os.mkdir(save_root)
    if posting_type == "news":
        posting_id = posting_url.split(',')[-1].split('.')[0]
        Log(f"crawling posting {posting_id}, {posting_url}")
        save_dir = os.path.join(save_root, f"{posting_id}.json")
        if os.path.exists(save_dir):
            Log(f"skip posting {posting_id}, {posting_url}")
            return None
        Log(f"to save posting information in {save_dir}")
        page = 1
        comment_list = []
        hasnext = True
        main_info = get_posting_maininfo(posting_url, posting_type)
        if main_info:
            Log(f"crawling main body done")
        else:
            return None
        while True:
            if not hasnext:
                break
            page_comment_list, page, hasnext = get_posting_comments(get_next_comment_page_url(posting_url, page), posting_type)
            comment_list = comment_list+page_comment_list
            page = str(int(page)+1)
            Log(f"crawling comment page {page} done")
        Log(f"crawling comments done")
        post = posting_info(main_info.author, main_info.title, main_info.create_time, main_info.main_body, comment_list)
        post.to_json(save_dir)
        Log(f"results saved to {save_dir}")
    elif posting_type == "caifuhao":
        pass
    else:
        Log(f"uncompleted function for posting type {posting_type}")

def posting_crawl(path):
    """
    根据给定的csv文件开始爬取这个股票的评论
    """
    with open(path, 'r', encoding="utf-8") as f:
        stock = Stock_csv(path)
        links, kinds, identity = stock.df['link'], stock.df['type'], id_maps.get_id_by_name(path.split('\\')[-1].split('.')[0])
        # print(link, kind, identity)
        for link, kind in zip(links, kinds):
            save_posting_detail(identity, link, kind)

if __name__ == '__main__':
    stocks = find_csv_files(POSTING_LIST_ROOT)
    for stock in stocks:
        posting_crawl(stock)