from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from bs4 import BeautifulSoup
from settings import BASE_URL
from urllib.parse import urljoin
from settings import BASE_URL, chrome_driver_path, POSTING_LIST_URL, RANK_URL, STOCK_ID_NAME_JSON, POSTING_LIST_ROOT, headers, STATUS_INFO_PATH
import json
import csv
import pandas as pd
import os 
import time
import requests

def get_posting_list_url(stock_id, page):
    if page != "1":
        return POSTING_LIST_URL+stock_id+",f_"+page+".html"
    else:
        return POSTING_LIST_URL+stock_id+",f.html"

def get_driver():
    # 配置Chrome浏览器的选项
    chrome_options = Options()
    chrome_options.add_argument('--headless')  # 无界面模式，可选择是否使用

    # 启动Chrome浏览器驱动
    driver = webdriver.Chrome(service=Service(chrome_driver_path), options=chrome_options)
    return driver

def get_posting_list_info(stock_id, page):
    page = str(page)
    info_dic_list = []
    # 配置Chrome浏览器的选项
    chrome_options = Options()
    chrome_options.add_argument('--headless')  # 无界面模式，可选择是否使用
    chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
    # 启动Chrome浏览器驱动
    driver = webdriver.Chrome(service=Service(chrome_driver_path), options=chrome_options)
    url = get_posting_list_url(stock_id, page)
    # 打开网页
    driver.get(url)

    # 等待页面加载完成
    wait = WebDriverWait(driver, 0.5)
    retry_times = 0
    while retry_times<20:
        try:
            listbody = wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'listbody')))
            break
        except TimeoutError:
            retry_times += 1
            Log(f"crawling page {page} failed, retry: {retry_times} time")
            continue
    # 提取<tbody class="listbody">节点的内容
    table_html = listbody.get_attribute('innerHTML')

    # 关闭浏览器驱动
    driver.quit()
    # 解析<tbody class="listbody">节点的内容
    soup = BeautifulSoup(table_html, 'html.parser')

    # 提取<tr class="listitem">标签
    listitem_tags = soup.find_all('tr', class_='listitem')
    # print(len(listitem_tags))
    for listitem in listitem_tags:
        # 提取阅读量
        read = listitem.find('div', class_='read').text
        
        # 提取评论量
        reply = listitem.find('div', class_='reply').text
        
        # 提取标题和帖子链接
        title = listitem.find('div', class_='title').a.text
        post_link = listitem.find('div', class_='title').a['href']
        # 提取作者和作者主页
        author = listitem.find('div', class_='author').a.text
        author_link = listitem.find('div', class_='author').a['href']
        # 提取最后更新时间
        update = listitem.find('div', class_='update').text
        # cur_date = (int(update.split()[0].split('-')[0]), int(update.split()[0].split('-')[1]))
        # 判断帖子类型
        if post_link.startswith("/news,"):
            post_link = urljoin(BASE_URL, post_link)
            post_type = "news"
        elif post_link.startswith("//caifuhao.eastmoney.com"):
            post_link = urljoin("https:",post_link)
            post_type = "caifuhao"
        else:
            post_link = "Unknown posting type"
            post_type = "Unknown posting type"

        # 判断作者信息
        if not author_link.startswith("//i.eastmoney.com"):
            author_link = "Unknown author type"
        else:
            author_link = urljoin("https:",author_link)
        info_dic = {}
        info_dic['阅读量'] = read
        info_dic['评论量'] = reply
        info_dic['标题'] = title
        info_dic['作者'] = author
        info_dic['最后更新时间'] = update
        info_dic['帖子链接'] = post_link
        info_dic['作者链接'] = author_link
        info_dic['类型'] = post_type
        info_dic_list.append(info_dic)
    return info_dic_list

def get_stock_id_collection():
    """
    如何实现翻页
    """
    # 配置Chrome浏览器的选项
    chrome_options = Options()
    chrome_options.add_argument('--headless')  # 无界面模式，可选择是否使用
    # 启动Chrome浏览器驱动
    driver = webdriver.Chrome(service=Service(chrome_driver_path), options=chrome_options)
    driver.get(RANK_URL)
    stock_name_id = {}
    while True:
        try:
            # 等待页面加载完成
            wait = WebDriverWait(driver, 10)
            listbody = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#rankCont > div.table_cont > div.tablebox > table > tbody')))
            # 提取<tbody class="listbody">节点的内容
            table_html = listbody.get_attribute('innerHTML')    
            soup = BeautifulSoup(table_html, "html.parser")
            listitems = soup.find_all('tr')
            for listitem in listitems:
                stock_id = listitem.find('div', class_='stocktd').a.text
                stock_name = listitem.find('div', class_='nametd_box').a.text
                stock_name_id[stock_id] = stock_name
            next_button = driver.find_element(By.LINK_TEXT, "下一页")
            if not next_button:
                driver.quit()
                break
            else:
                next_button.click()
        except:
            with open("stock_id.json", "w", encoding="utf-8") as f:
                json.dump(stock_name_id, f, ensure_ascii=False)
            break

class Stock_id:
    """
    工具类，获取股票的名称和id信息
    """
    def __init__(self, data_path=STOCK_ID_NAME_JSON):
        with open(data_path, encoding="utf-8") as f:
            self.id_name_dic = json.load(f)
        self.name_id_dic = {}
        for id in self.id_name_dic:
            self.name_id_dic[self.id_name_dic[id]] = id
    
    def __len__(self):
        return len(self.id_name_dic)
    
    def get_name_by_id(self, id):
        return self.id_name_dic[id]
    
    def get_id_by_name(self, name):
        return self.name_id_dic[name]
    
    def get_names(self):
        return self.name_id_dic.keys()
    
    def get_ids(self):
        return self.id_name_dic.keys()

def test_stock_id_class():
    # 指定 JSON 文件路径
    data_path = STOCK_ID_NAME_JSON

    # 创建 Stock_id 实例
    stock_ids = Stock_id(data_path)

    # 测试 get_name_by_id 方法
    id = "000977"
    name = stock_ids.get_name_by_id(id)
    print(f"Stock name for id {id}: {name}")

    # 测试 get_id_by_name 方法
    name = "浪潮信息"
    id = stock_ids.get_id_by_name(name)
    print(f"Stock id for name {name}: {id}")

    # 测试 get_names 方法
    names = stock_ids.get_names()
    print("Stock names:")
    for name in names:
        print(name)

    # 测试 get_ids 方法
    ids = stock_ids.get_ids()
    print("Stock ids:")
    for id in ids:
        print(id)

    # 测试 __len__ 方法
    length = len(stock_ids)
    print(f"Total number of stock ids: {length}")

def build_json(json_path, dic:dict):
    try:
        with open(json_path, 'w', encoding="utf-8") as f:
            json.dump(dic, ensure_ascii=False)
    except:
        print(f"Build json file {json} failed\n")
        raise RuntimeError()

def get_csv_path(name):
    return os.path.join(POSTING_LIST_ROOT, name)

class Stock_csv:
    def __init__(self, csv_path) -> None:
        self.df = pd.read_csv(csv_path, encoding="utf-8")
        self.df = self.df.iloc[:, 1:]
        self.df.columns = ['nread', 'ncomment', 'title', 'author', 'update', 'link', 'linkauthor', 'type']
    
    def __len__(self):
        return len(self.df)

    def get_row(self, row_idx):
        return self.df.iloc[row_idx]

class User:
    def __init__(self, name:str, link:str) -> None:
        self.name = name
        self.link = link
        self.info_dic = {"name":name, "link":link}
    def __str__(self):
        return f"name: {self.name}, link: {self.link}"

class comment_info:
    def __init__(self, floor_owner:User, pubtime:str, title:str, sons=None) -> None:
        self.floor_owner = floor_owner
        self.pubtime = pubtime
        self.title = title
        self.sons = sons
        self.info_dic = {
            'floor_owner': self.floor_owner.info_dic,
            'pubtime': self.pubtime,
            'title': self.title,
            'sons': [son.info_dic for son in self.sons]
        }

    
class posting_main_info:
    """
    一个帖子的标题，作者，创建时间，主要内容
    """
    def __init__(self, author:User , title:str, create_time:str, main_body:str) -> None:
        self.author = author
        self.title = title
        self.create_time = create_time
        self.main_body = main_body


class posting_info:
    def __init__(self, author:User , title:str, create_time:str, main_body:str, comment_list) -> None:
        self.author = author
        self.title = title
        self.create_time = create_time
        self.main_body = main_body
        self.comment_list = comment_list
        self.info_dic = {
            'author': self.author.info_dic,
            'title': self.title,
            'create_time': self.create_time,
            'main_body': self.main_body,
            'comment_list': [comment.info_dic for comment in self.comment_list]
        }
    def to_json(self, path):
        with open(path, "w", encoding="utf-8") as f:
            json.dump(self.info_dic, f, ensure_ascii=False)
        
def get_posting_maininfo(posting_url, posting_type):
    # 配置Chrome浏览器的选项
    chrome_options = Options()
    chrome_options.add_argument('--headless')  # 无界面模式，可选择是否使用
    chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
    # 启动Chrome浏览器驱动
    driver = webdriver.Chrome(service=Service(chrome_driver_path), options=chrome_options)
    # 打开网页
    driver.get(posting_url)
    driver.implicitly_wait(5)
    try:
        if driver.current_url != posting_url:
            Log(f"Empty url: {posting_url}")
            return None
    except:
        Log(f"Empty url: {posting_url}")
        return None
    if posting_type == "news":
        posting_title = driver.find_element(By.CSS_SELECTOR, "#newscontent > div:nth-child(2) > div.newstitle")
        create_time = driver.find_element(By.CSS_SELECTOR, "#newscontent > div:nth-child(2) > div.newsauthor > div > div.time")
        link_posting_author = driver.find_element(By.CSS_SELECTOR, "#newscontent > div:nth-child(2) > div.newsauthor > div > a.name").get_attribute('href')
        posting_author = driver.find_element(By.CSS_SELECTOR, "#newscontent > div:nth-child(2) > div.newsauthor > div > a.name").text
        link_posting_author = urljoin(BASE_URL, link_posting_author)
        if not link_posting_author.startswith("https://i"):
            link_posting_author = "Unknown author type"
        # print(create_time.text)
        try:
            main_body = driver.find_element(By.CSS_SELECTOR, "#zw_body")
        except:
            try:
                main_body = driver.find_element(By.CSS_SELECTOR, "#newscontent > div.newstext")
            except:
                Log(f"some thing wrong with {posting_url}")
        return posting_main_info(User(posting_author, link_posting_author), posting_title.text ,create_time.text, main_body.text)
    elif posting_type == "caifuhao":
        try:
            posting_title = driver.find_element(By.CSS_SELECTOR, "#main > div.grid_wrapper > div.grid > div.g_content > div.article.page-article > div.article-head > h1")
            try:
                posting_author = driver.find_element(By.CSS_SELECTOR, "#authorwrap > a")
            except Exception:
                # 来自 类型的财富号
                posting_author = driver.find_element(By.CSS_SELECTOR, "#main > div.grid_wrapper > div.grid > div.g_content > div.article.page-article > div.article-head > div.article-meta > span.item > a")
            link_posting_author = posting_author.get_attribute('href')
            link_posting_author = urljoin(BASE_URL, link_posting_author)
            create_time = driver.find_element(By.CSS_SELECTOR, "#main > div.grid_wrapper > div.grid > div.g_content > div.article.page-article > div.article-head > div.article-meta > span:nth-child(2)")
            posting_author = posting_author.text
            if not link_posting_author.startswith("https://i"):
                link_posting_author = "Unknown author type"
            main_body = driver.find_element(By.CSS_SELECTOR, "#main > div.grid_wrapper > div.grid > div.g_content > div.article.page-article > div.article-body")
            return posting_main_info(User(posting_author, link_posting_author), posting_title.text ,create_time.text, main_body.text)
        except Exception as ex:
            print(f"some thing wrong with {posting_url}\n {ex}")
            Log(f"some thing wrong with {posting_url}\n {ex}")
    else:
        Log("invalid posting type")
        return None
        


        
def get_posting_comments(posting_url, posting_type):
    # 配置Chrome浏览器的选项
    chrome_options = Options()
    chrome_options.add_argument('--headless')  # 无界面模式，可选择是否使用
    chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
    # 启动Chrome浏览器驱动
    driver = webdriver.Chrome(service=Service(chrome_driver_path), options=chrome_options)
    # 打开网页
    driver.get(posting_url)
    driver.implicitly_wait(5)
    if posting_type == "news":
        comment_list = []
        try:
            reply_list = driver.find_element(By.CSS_SELECTOR, "#allReplyList > div.replylist_content").get_attribute("innerHTML")
        except:
            Log("no reply")
            return comment_list, 1, False
        # print(reply_list)
        reply_soup = BeautifulSoup(reply_list, "html.parser")
        replys = reply_soup.find_all("li", attrs={
            "class" : "reply_item cl"
        })
        # 在评论的第几页
        try:
            page = driver.find_element(By.CSS_SELECTOR, "#allReplyList > div.pager > ul > li.on").text
        except:
            page = 1
        # print(f"on page {page}")
        # 是否还有下一页
        try:
            next_btn = driver.find_element(By.CSS_SELECTOR, "#allReplyList > div.pager > ul > li:nth-child(8) > a")
            has_next = True
        except:
            has_next = False
        # print(f"has_next {has_next}")
        for reply in replys:
            floor_owner = reply.find("div", {
                "class":"item_reuser"
            }).text
            
            link_floor_owner = reply.find("div", {
                "class":"item_reuser"
            }).a['href']
            link_floor_owner = urljoin(BASE_URL, link_floor_owner)
            
            publish_time = reply.find("div", {
                "class":"publishtime"
            }).span.text
            
            reply_title = reply.find("div", {
                "class":"reply_title"
            }).span.text

            son_reply = reply.find("ul", {
                "class":"replyListL2"
            })
            all_son_replys = []
            if son_reply:
                list_son_reply = son_reply.find_all("li", {
                    "class" : "reply_item_l2"
                })
                for son_reply in list_son_reply:
                    son_user = son_reply.find("div", {
                        "class" : "reuser_l2_nick fl"
                    })
                    nick_name, link_user = son_user.text, son_user.a['href']
                    link_user = urljoin("https:", link_user)
                    son_reply_title = son_reply.find("div", {
                        "class" : "reply_title"
                    }).span.text
                    son_publish_time = son_reply.find("div", {
                        "class", "bottom_left fl"
                    }).span.text
                    son_comment = comment_info(User(nick_name, link_user), son_publish_time, son_reply_title, [])
                    all_son_replys.append(son_comment)
            # 记录评论区
            comment = comment_info(User(floor_owner, link_floor_owner), publish_time, reply_title, all_son_replys)
            comment_list.append(comment)    
        # posting = posting_info(User(posting_author, link_posting_author), posting_title.text ,create_time.text, main_body.text, comment_list)
        return comment_list, page, has_next
    elif posting_type == "caifuhao":
        comment_list = []
        try:
            driver.find_element(By.CSS_SELECTOR, "#replyList")
        except Exception:
            Log("no reply")
            return comment_list, 1, False
        # 找到button
        comment_link = driver.find_element(By.CSS_SELECTOR, "#comment_all > div.bottom_btns.clearfix > a").get_attribute('href')
        if comment_link.startswith("https://guba"):
            return get_posting_comments(comment_link, "news")
        else:
            return None



def get_posting_detail(posting_url, posting_type):
    # 配置Chrome浏览器的选项
    chrome_options = Options()
    chrome_options.add_argument('--headless')  # 无界面模式，可选择是否使用
    chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
    # 启动Chrome浏览器驱动
    driver = webdriver.Chrome(service=Service(chrome_driver_path), options=chrome_options)
    # 打开网页
    driver.get(posting_url)
    driver.implicitly_wait(5)
    if posting_type == "news":
        comment_list = []
        posting_title = driver.find_element(By.CSS_SELECTOR, "#newscontent > div:nth-child(2) > div.newstitle")
        create_time = driver.find_element(By.CSS_SELECTOR, "#newscontent > div:nth-child(2) > div.newsauthor > div > div.time")
        link_posting_author = driver.find_element(By.CSS_SELECTOR, "#newscontent > div:nth-child(2) > div.newsauthor > div > a.name").get_attribute('href')
        posting_author = driver.find_element(By.CSS_SELECTOR, "#newscontent > div:nth-child(2) > div.newsauthor > div > a.name").text
        link_posting_author = urljoin(BASE_URL, link_posting_author)
        if not link_posting_author.startswith("https://i"):
            link_posting_author = "Unknown author type"
        # print(create_time.text)
        main_body = driver.find_element(By.CSS_SELECTOR, "#zw_body")
        # print(main_body.text)
        reply_list = driver.find_element(By.CSS_SELECTOR, "#allReplyList > div.replylist_content").get_attribute("innerHTML")
        # print(reply_list)
        reply_soup = BeautifulSoup(reply_list, "html.parser")
        replys = reply_soup.find_all("li", attrs={
            "class" : "reply_item cl"
        })
        # 在评论的第几页
        page = driver.find_element(By.CSS_SELECTOR, "#allReplyList > div.pager > ul > li.on").text
        print(f"on page {page}")
        # 是否还有下一页
        try:
            next_btn = driver.find_element(By.CSS_SELECTOR, "#allReplyList > div.pager > ul > li:nth-child(8) > a")
            has_next = True
        except:
            has_next = False
        print(f"has_next {has_next}")
        for reply in replys:
            floor_owner = reply.find("div", {
                "class":"item_reuser"
            }).text
            
            link_floor_owner = reply.find("div", {
                "class":"item_reuser"
            }).a['href']
            link_floor_owner = urljoin(BASE_URL, link_floor_owner)
            
            publish_time = reply.find("div", {
                "class":"publishtime"
            }).span.text
            
            reply_title = reply.find("div", {
                "class":"reply_title"
            }).span.text

            son_reply = reply.find("ul", {
                "class":"replyListL2"
            })
            all_son_replys = []
            if son_reply:
                list_son_reply = son_reply.find_all("li", {
                    "class" : "reply_item_l2"
                })
                for son_reply in list_son_reply:
                    son_user = son_reply.find("div", {
                        "class" : "reuser_l2_nick fl"
                    })
                    nick_name, link_user = son_user.text, son_user.a['href']
                    link_user = urljoin("https:", link_user)
                    son_reply_title = son_reply.find("div", {
                        "class" : "reply_title"
                    }).span.text
                    son_publish_time = son_reply.find("div", {
                        "class", "bottom_left fl"
                    }).span.text
                    son_comment = comment_info(User(nick_name, link_user), son_publish_time, son_reply_title, [])
                    all_son_replys.append(son_comment)
            # 记录评论区
            comment = comment_info(User(floor_owner, link_floor_owner), publish_time, reply_title, all_son_replys)
            comment_list.append(comment)    
        posting = posting_info(User(posting_author, link_posting_author), posting_title.text ,create_time.text, main_body.text, comment_list)
        return posting, page, has_next
    
# posting = get_posting_detail("https://guba.eastmoney.com/news,000977,1325409452.html", "news")
# posting.to_json(f"{posting.title}.json")

def Log(info):
    now = time.asctime()
    with open("Log.log", 'a', encoding="utf-8") as f:
        try:
            log = f"[{now}]: {info}\n"
        except:
            log = "Wrong Type For Log\n"
        f.writelines(log)

def find_csv_files(directory):
    csv_files = []
    for root, dirs, files in os.walk(directory):
        for file in files:
            if file.endswith('.csv'):
                csv_files.append(os.path.abspath(os.path.join(root, file)))
    return csv_files

def get_status(stock_id):
    with open(STATUS_INFO_PATH, 'r', encoding="utf-8") as f:
        try:
            cur_info = json.load(f)
        except json.decoder.JSONDecodeError:
            cur_info = {}
        if stock_id in cur_info:
            return cur_info[stock_id]
        else:
            return {}

def update_status(stock_id, page, status):
    with open(STATUS_INFO_PATH, 'r', encoding="utf-8") as f:
        try:
            cur_info = json.load(f)
        except json.decoder.JSONDecodeError:
            cur_info = {}
    tmp_info = {stock_id:[str(page), status]}
    cur_info.update(tmp_info)
    with open(STATUS_INFO_PATH, 'w', encoding="utf-8") as f:
        json.dump(cur_info, f)

if __name__ == "__main__":
    test_url = "https://caifuhao.eastmoney.com/news/20230712212719184098560?from=guba&name=5Lq65rCR572R5ZCn&gubaurl=aHR0cDovL2d1YmEuZWFzdG1vbmV5LmNvbS9saXN0LDYwMzAwMC5odG1s"
    info = get_posting_comments(test_url, "caifuhao")
    print(info)

def check_date(url):
    res = None
    if url.startswith('http://guba') or url.startswith('https://guba'):
        response = requests.get(url, headers=headers)
        soup = BeautifulSoup(response.content,'html.parser')
        meta = soup.select('#newscontent > div:nth-child(2) > div.newsauthor')[0]
        create_time = meta.find(name='div', attrs={
            'class':'time'
        })
        res = create_time.contents[0]
        return res[:4]
    else:
        # 配置Chrome浏览器的选项
        chrome_options = Options()
        chrome_options.add_argument('--headless')  # 无界面模式，可选择是否使用
        chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
        # 启动Chrome浏览器驱动
        driver = webdriver.Chrome(service=Service(chrome_driver_path), options=chrome_options)
        # 打开网页
        driver.get(url)
        driver.implicitly_wait(5)
        create_time = driver.find_element(By.CSS_SELECTOR, '#main > div.grid_wrapper > div.grid > div.g_content > div.article.page-article > div.article-head > div.article-meta > span:nth-child(2)')
        return create_time.text[:4]

if __name__ == "__main__":
    chrome_options = Options()
    chrome_options.add_argument('--headless')  # 无界面模式，可选择是否使用
    chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
    # 启动Chrome浏览器驱动
    driver = webdriver.Chrome(service=Service(chrome_driver_path), options=chrome_options)