import requests
import time
from datetime import datetime
from lxml import etree
from service.weibo_info import BlogInfo
from service.comment import Comment
from dao.dao import MysqlInfo
from dao.dao import DateBase


WEIBO_HEADERS = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                  "Chrome/101.0.4951.64 Safari/537.36 Edg/101.0.1210.47 "
}


def get_cookies(cookie_path: str) -> dict:
    cookies = dict()
    with open(cookie_path, "r", encoding="utf-8") as file:
        content = file.read()
        cookie_list = content.split(";")
        for c in cookie_list:
            c = c.strip()
            cookie = c.split("=")
            cookies[cookie[0]] = cookie[1]
    return cookies


def convert_href_to_user_id(hrefs: list) -> list:
    user_ids = []
    for href in hrefs:
        user_id = href.split("?")[0][12:]
        user_ids.append(int(user_id))
    return user_ids


def get_blog_id_from_html(xpath_data) -> list:
    blog_ids = xpath_data.xpath("//div[@id='pl_feed_main']/div[1]//div[@class='card-wrap']/@mid")
    if len(blog_ids) == 0:
        print("get_blog_id_from_html count is 0")
    for index in range(len(blog_ids)):
        blog_ids[index] = int(blog_ids[index])

    return blog_ids


def get_nick_name_from_html(xpath_data) -> list:
    nick_names = xpath_data.xpath("//div[@class='content']/div[@class='info']/div[2]/a/text()")
    if len(nick_names) == 0:
        print("get_nick_name_from_html count is 0")

    return nick_names


def get_user_id_from_html(xpath_data) -> list:
    href = xpath_data.xpath("//div[@class='content']/div[@class='info']/div[2]/a/@href")
    user_ids = convert_href_to_user_id(href)
    if len(user_ids) == 0:
        print("get_user_ids_from_html count is 0")

    return user_ids


def get_release_time_from_html(xpath_data) -> list:
    release_times = xpath_data.xpath("//div[@class='content']/p[@class='from']/a[1]/text()")
    if len(release_times) == 0:
        print("get_release_time_from_html count is 0")
    for index in range(len(release_times)):
        release_times[index] = release_times[index].strip()
        dateStr = "2022 " + release_times[index]
        release_times[index] = datetime.strptime(dateStr, "%Y %m月%d日 %H:%M")

    return release_times


def get_forwards_from_html(xpath_data) -> list:
    new_forwards = []
    forwards = xpath_data.xpath("//div[@class='card']/div[2]/ul/li[1]/a/text()")
    if len(forwards) == 0:
        print("get_forwards_from_html count is 0")
    for index in range(len(forwards)):
        forwards[index] = forwards[index].strip()
        if forwards[index] == "转发" or forwards[index] == "":
            new_forwards.append(0)
        else:
            new_forwards.append(int(forwards[index]))

    return new_forwards


def get_comments_from_html(xpath_data) -> list:
    comments = xpath_data.xpath("//div[@class='card']/div[2]/ul/li[2]/a/text()")
    if len(comments) == 0:
        print("get_comments_from_html count is 0")
    for index in range(len(comments)):
        comments[index] = comments[index].strip()
        if comments[index] == "评论" or comments[index] == "":
            comments[index] = 0
        else:
            comments[index] = int(comments[index])

    return comments


def get_likes_from_html(xpath_data) -> list:
    likes = xpath_data.xpath("//div[@class='card']/div[2]/ul/li[3]//span[@class='woo-like-count']/text()")
    if len(likes) == 0:
        print("get_likes_from_html count is 0")
    for index in range(len(likes)):
        likes[index] = likes[index].strip()
        if likes[index] == "赞" or likes[index] == "":
            likes[index] = 0
        else:
            likes[index] = int(likes[index])

    return likes


def get_blog_content_content_from_html(xpath_data) -> list:
    content_list = []
    blog_list = xpath_data.xpath("//div[@class='card']/div[1]/div[2]")
    if len(blog_list) == 0:
        print("get_blog_content_content_from_html blog_list is 0")
    for blog in blog_list:
        contents = blog.xpath(".//p[3]/text()")
        if len(contents) == 0:
            contents = blog.xpath(".//p[2]/text()")
        content = ""

        for value in contents:
            content += value.strip()

        content_list.append(content)

    return content_list


def get_next_page_url(xpath_data) -> str:
    next_page_url = "https://s.weibo.com/"
    button_text = xpath_data.xpath("//div[@class='m-page']/div/a[1]/text()")
    if len(button_text) > 0 and button_text[0] == "下一页":
        next_page_url += xpath_data.xpath("//div[@class='m-page']/div/a[1]/@href")[0]
        return next_page_url

    button_text = xpath_data.xpath("//div[@class='m-page']/div/a[2]/text()")
    if len(button_text) > 0 and button_text[0] == "下一页":
        next_page_url += xpath_data.xpath("//div[@class='m-page']/div/a[2]/@href")[0]
        return next_page_url
    return ""


def convert_wei_bo_info(blog_id:list, nick_name: list, user_id: list, release_time: list, forwards: list,
                        comments: list, likes: list, content: list) -> list:
    info_list = []
    for index in range(len(nick_name)):
        info_list.append(
            BlogInfo(
                search_content="上海疫情", 
                blog_id=blog_id[index],
                nick_name=nick_name[index], 
                user_id=user_id[index], 
                release_time=release_time[index],
                forward_count=forwards[index], 
                comment_count=comments[index], 
                like_count=likes[index], 
                content=content[index]
            )
        )
    return info_list


class Blog:

    def __init__(self, cookie_path: str, base_url: str, mysql_info: MysqlInfo) -> None:
        self.cookies = get_cookies(cookie_path)
        self.base_url = base_url
        self.dateBase = DateBase(mysql_info)
        self.comment = Comment(self.cookies)

    def collect_all_blog_info(self):
        while self.base_url != "":
            blog_list = self.get_blog_info_by_url(self.base_url)
            self.save_blog_list(blog_list)

            comment_list = self.get_blog_comments(blog_list)
            self.save_comment_list(comment_list)
        
            response = requests.get(url=self.base_url, cookies=self.cookies, headers=WEIBO_HEADERS)
            xpath_data = etree.HTML(response.text)
            self.base_url = get_next_page_url(xpath_data)
            time.sleep(1)
    
    def get_blog_info_by_url(self, url: str) -> list:
        response = requests.get(url=url, cookies=self.cookies, headers=WEIBO_HEADERS)
        xpath_data = etree.HTML(response.text)
        blod_id_list = get_blog_id_from_html(xpath_data)
        nick_name_list = get_nick_name_from_html(xpath_data)
        user_id_list = get_user_id_from_html(xpath_data)
        release_time_list = get_release_time_from_html(xpath_data)
        forward_list = get_forwards_from_html(xpath_data)
        comments_list = get_comments_from_html(xpath_data)
        likes_list = get_likes_from_html(xpath_data)
        content_list = get_blog_content_content_from_html(xpath_data)
        blog_info_list = convert_wei_bo_info(
                                                blog_id=blod_id_list, 
                                                nick_name=nick_name_list, 
                                                user_id=user_id_list, 
                                                release_time=release_time_list, 
                                                forwards=forward_list, 
                                                comments=comments_list, 
                                                likes=likes_list, 
                                                content=content_list
                                            )
        return blog_info_list

    def get_blog_comments(self, blog_list: list) -> list:
        comment_list = []
        for blog in blog_list:
            print("\n\n\n", blog.content)
            comments = self.comment.get_all_comments_by_user_id_blog_id(blog.user_id, blog.blog_id)
            for c in comments:
                comment_list.append(c)

        return comment_list
            

    def save_blog_list(self, blog_list: list):
        for blog in blog_list:
            self.dateBase.insert_blog_row(blog)

    def save_comment_list(self, comment_list: list):
        for comment in comment_list:
            self.dateBase.insert_comment_row(comment)

    def close(self):
        self.dateBase.close()


if __name__ == "__main__":
    url = "https://s.weibo.com/weibo?q=%E4%B8%8A%E6%B5%B7%E7%96%AB%E6%83%85&xsort=hot&suball=1&timescope=custom:2022" \
           "-03-15:2022-05-15&Refer=g "
    weibo = Blog("./../cookie.txt", url)
    weibo.get_comment_info_by_url("https://weibo.com/2810373291/LtHgfEzmd?refer_flag=1001030103_")
    weibo.close()

