import requests
import bs4
from bs4 import BeautifulSoup
import lxml
import csv
import random
import re
import pandas as pd
from settings import BASE_URL, LIST_URL, POSTING_URL, headers
import time
# BASE_URL = "https://guba.eastmoney.com/"
# LIST_URL =  BASE_URL + "o/list,"
# POSTING_URL = BASE_URL + "news,"

# headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
#                         'Chrome/51.0.2704.63 Safari/537.36'}

def get_posting_list(stock_id, page):
    """
    返回stock_id对应股票的第page页的评论页表的URL
    """
    return LIST_URL + str(stock_id) + "_" + str(page) + ".html"

def get_posting_detail(stock_id, post_id):
    return POSTING_URL + str(stock_id) + "," + str(post_id) + ".html"

def parse_article_meta(soup):
    """
    解析帖子详情页面中的meta信息，主要包含楼主名，发布时间，发布地点
    """
    article_meta = soup.find('div',{
        'class' : 'article-meta'
    })
    # if not article_meta:
    #     print("Wrong meta")
    #     return None, None
    try:
        auth_name = article_meta.find('a',{
            "class" : "auth name"
        })
        txt_list = article_meta.find_all('span', {
            "class" : "txt"
        })
    except:
        pass
    news_time = None
    for txt in txt_list:
        # 看哪个是日期
        if bool(re.search(r"\d", txt.text)):
            news_time = txt
            break
    return auth_name, news_time
    
results = []
def stock_crawl(url):
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.content, 'lxml')
    stock_header = soup.find("div", {
        'id': 'stockheader'
    }).find_all('a')
    stock_name = stock_header[0].text

    post_list = soup.find("div", {
        'class': 'all hs_list'
    })
    num = 0 
    for post in post_list:
        if isinstance(post, bs4.element.Tag):
            post_info = post.find_all('span')
            if len(post_info) >= 4:
                reading_num = post_info[0].text
                if reading_num == '阅读':
                    continue
                comment_num = post_info[1].text
                post_type = "Regular"
                if post_info[2].find_all('em'):
                    post_type = post_info[2].find_all('em')[0].text
                a_tags = post_info[2].find_all('a')
                stock_name_id, post_id = 0, 0
                news_title, author_name, news_text, news_time = None, None, None, None 
                for a_tag in a_tags:
                    if 'href' in a_tag.attrs:
                        detail_url = a_tag['href']  
                        if detail_url.startswith("//caifuhao"):
                            post_type = "caifuhao"
                            detail_url = "https:" + detail_url
                            news_title, author_name, news_text, news_time = article_crawl(detail_url)
                        else:
                            post_type = "guba"
                            detail_url = "https://guba.eastmoney.com" + detail_url
                            news_title, author_name, news_text, news_time = posting_crawl(detail_url)

                # print("Post Type: ", post_type)
                # print("Post Reading Num: ", reading_num)
                # print("Post Comment Num: ", comment_num)
                if not news_title or not author_name or not news_text or not news_time:
                    with open('exception.txt', 'a') as f:
                        f.write(detail_url+"\n")
                    continue
                results.append([detail_url, post_type, news_title.text, author_name.text, news_text.text, news_time.text, reading_num, comment_num])
    df = pd.DataFrame(results,
                    columns=['url', 'type', 'Title', 'author', 'Text', 'Time', 'Reading Amount', 'Comment Amount'])
    with pd.ExcelWriter('result.xlsx', mode='a', if_sheet_exists='replace') as writer:
        df.to_excel(writer)
    print("Stock Name: ", stock_name)
    print(df)

def article_crawl(url):
    print("posting url: ", url)
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.content, 'lxml')
    
    news_title = soup.find("h1", {
        "class" : "article-title"
    })

    author_name, news_time = parse_article_meta(soup)

    news_text = soup.find("div", {
        "class" :  "article-body"
    })

    
    # print("News Title:",news_title.text.strip() if news_title else "not a posting")
    # print("Author Name:",author_name.text.strip() if author_name else "not a posting")
    # print("News Time:",news_time.text.strip() if news_time else "not a posting")
    # print("News Text:",news_text.find('div').text.strip() if news_text else "not a posting")
    return news_title, author_name, news_text, news_time


def posting_crawl(url):
    print("posting url: ", url)
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.content, 'lxml')
    # print(soup.find_all('div'))
    news_title = soup.find("div", {
        'class': 'newstitle'
    })
    news_text = soup.find("div", {
        'class': 'newstext'
    })
    author_name = soup.find("a", {
        'class': 'name'
    })
    news_time = soup.find("div", {
        'class': 'time'
    })
    like_num = soup.find("span", {
        "class" : "likemodule"
    })
    # print("News Title:", news_title.text if news_title else "not a posting")
    # print("News Text:", news_text.text if news_text else "not a posting")
    # print("Author Name:", author_name.text if author_name else "not a posting")
    # print("News Time:", news_time.text if news_time else "not a posting")
    # print("Like num:", like_num.text if like_num.text!="点赞" else "0")
    return news_title, author_name, news_text, news_time
if __name__ == "__main__":
    for page in range(1, 100):
        list_url = get_posting_list(600000, page)
        stock_crawl(list_url)
    # article_crawl("https://caifuhao.eastmoney.com/news/20230218085044079701540?from=guba&name=5rWm5Y%2BR6ZO26KGM5ZCn&gubaurl=aHR0cDovL2d1YmEuZWFzdG1vbmV5LmNvbS9saXN0LDYwMDAwMC5odG1s")

    