"""
爬取指定关键字的微博的内容，转发量，评论数，点赞数，发布时间，发布者，发布平台，发布者粉丝数，发布者关注数，发布者微博数，
发布者地区，发布者转评赞，发布者简介，发布者主页标签，发布者名称，发布者id
"""

import requests
import json
from lxml import etree
import os
import re
from DrissionPage import WebPage
import time
import shutil
from datetime import datetime, timedelta
from Storage import Storage


class WeiboSpider:
    def __init__(self):
        with open('Cookies.txt', 'r') as file:
            cookies = file.read()
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36'
            , 'Cookie': cookies
            , 'Referer': 'https://weibo.com/'
            , 'Origin': 'https://weibo.com'
            , 'Client-Version': 'v2.45.45'
            , 'X-Xsrf-Token': 'Yll-HpIKFdZuWETfpc9mR-Aq'
        }

    def date_format(self, date_str_start, date_str_end):
        # 将字符串转换为日期对象
        start_date = datetime.strptime(date_str_start, "%Y-%m-%d")
        end_date = datetime.strptime(date_str_end, "%Y-%m-%d")
        date_list = []
        # 循环遍历并打印日期
        current_date = start_date
        while current_date <= end_date:
            date_list.append(current_date.strftime("%Y-%m-%d"))
            # print(current_date.strftime("%Y-%m-%d"))
            current_date += timedelta(days=1)
        return date_list

    def rewrite_cookies(self):
        page = WebPage()
        page.get("https://weibo.com/login.php")
        time.sleep(20)  # 手工验证时间
        cookies_dict = page.cookies(as_dict=True)
        cookies = ''
        for key in cookies_dict:
            cookies += ';' + key + '=' + cookies_dict[key]
        cookies = cookies[1:]
        with open('Cookies.txt', 'w') as file:
            file.write(cookies)
        self.__init__()  # 再重新初始化一遍
        page.close()

    def get_response(self, url, key_table_name, type='Text'):
        """
        :param url: 请求的url
        :param type: 返回类型，默认为Text，可选Text和Html和Content
        :return: 返回请求的响应
        Todo: 如果有验证，到时候在返回cookies重写；临晨1.到6.休息

        """
        time_now = datetime.now().strftime("%H:%M:%S")
        if '01:00:00' <= time_now <= '06:00:00':
            print(f'''现在时间是凌晨，休息一下！''')
            time.sleep(6 * 60 * 60)

        while True:
            response = requests.get(url, headers=self.headers)
            if 'window.location.href = "https://weibo.com/login.php"' in response.text:
                self.rewrite_cookies()
                print(f'''手工更新cookies成功！''')
            else:
                if type == 'Text':
                    return response.text
                elif type == 'Html':
                    return etree.HTML(response.text)
                elif type == 'Content':
                    return response.content
                elif type == 'Json':
                    try:
                        response = response.json()
                    except:
                        self.rewrite_cookies()
                        print(f'''手工更新cookies成功！''')
                        continue
                    return response

    def deal_int(self, type, response):
        xpath = ''
        if type == 'forwarding_num':
            xpath = "//a[@action-type='feed_list_forward']/text()"
        elif type == 'comment_num':
            xpath = "//a[@action-type='feed_list_comment']/text()"
        elif type == 'like_num':
            xpath = "//div[@class='card-act']//a[@action-type='feed_list_like']//text()"
        result_list = []
        result_temp = response.xpath(xpath)
        for i in result_temp:
            if i.strip() == '':
                continue
            else:
                try:
                    i = int(i.strip())
                except ValueError:
                    i = 0
                result_list.append(i)
        return result_list

    def get_weibo_data(self, keyword, key_table_name, start_time, end_time):
        """
        :param keyword: 关键字
        :return: 返回微博内容，转发量，评论数，点赞数，发布时间，发布者，发布平台，发布者粉丝数，发布者关注数，发布者微博数，
        发布者地区，发布者转评赞，发布者简介，发布者主页标签，发布者名称，发布者id
        """
        Storage().create_table(key_table_name)  # 建表存储数据

        date_list = self.date_format(start_time, end_time)
        print(f''' 搜索时间段为{start_time}至{end_time}: {date_list}''')
        content_url = []
        for date in date_list:
            for t in range(23):
                content_url.append(f'''https://s.weibo.com/weibo?q={keyword}&typeall=1&suball=1&timescope=custom%3A{date}-{t}%3A{date}-{t+1}&Refer=g''')

        for url in content_url:
            time.sleep(2)
            response = self.get_response(url=url, key_table_name=key_table_name, type='Html')
            # 对于没有搜索结果的，跳过
            is_empty = ''.join(response.xpath("//div[@class='card-wrap']//text()"))
            if '未找到相关结果' in is_empty:
                # yield data_content, data_user
                print(
                    f'''该时间段没有对应的帖子！ {'到'.join(list(map(lambda x: x.replace('3A', ''), url.split('&')[-2].split('%')[1:])))}''')
                continue

            # 每一页的微博
            page_list = list(map(lambda x: 'https://s.weibo.com'+x, response.xpath("//ul[@class='s-scroll']//a/@href")))
            # 对于只有一页情况，是没有s-scroll的
            if len(page_list) == 0:
                page_list = [url]
            print(f'''{'到'.join(list(map(lambda x: x.replace('3A', ''), url.split('&')[-2].split('%')[1:])))}一共{len(page_list)}页''')

            for page_url in page_list:
                time.sleep(2)
                data_content, data_user = {}, {}
                response = self.get_response(url=page_url, key_table_name=key_table_name, type='Html')
                data_content['search_key'] = keyword

                # 内容采集时间
                data_content['update_time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

                # 发布时间
                release_time_list = []
                release_time_list_tmp = list(map(lambda x: x.strip(), response.xpath("//div[@class='from']/a[1]/text()")))
                for i in release_time_list_tmp:
                    temp = i.replace('年', '-').replace('月', '-').replace('日', '') + ':00'
                    if len(temp.split('-')) == 2:
                        temp = '2024-' + temp
                    release_time_list.append(temp)
                data_content['release_time'] = release_time_list

                # 发布者
                author_list = response.xpath("//div[@style='padding: 6px 0 3px;']/a/text()")
                data_content['author'] = author_list
                data_user['author'] = author_list

                # 发布平台
                platform_list_temp = response.xpath("//div[@class='from']")
                platform_list = []
                for i in platform_list_temp:
                    s = i.xpath("./a/text()")
                    try:
                        platform_list.append(s[1])
                    except IndexError:
                        platform_list.append('')
                data_content['platform'] = platform_list

                # 微博内容
                traveled_author = []
                content_list = []
                for k in author_list:
                    if k not in traveled_author:  # 对于一页相同的作者，只遍历一次
                        traveled_author.append(k)
                    else:
                        continue
                    xpath = f'''//p[@class='txt' and @nick-name='{k}'][last()]'''
                    content_list_temp = response.xpath(xpath)
                    for j in content_list_temp:
                        s = ''.join(list(map(lambda x: x.strip().replace('\u200b', ''), j.xpath("text()"))))
                        content_list.append(s)
                data_content['content'] = content_list

                # 转发量，评论数, 点赞数
                forwarding_num_list = self.deal_int(type='forwarding_num', response=response)
                comment_num_list = self.deal_int(type='comment_num', response=response)
                like_num_list = self.deal_int(type='like_num', response=response)
                data_content['forwarding_num'] = forwarding_num_list
                data_content['comment_num'] = comment_num_list
                data_content['like_num'] = like_num_list

                # 发布者id和个人主页
                author_href_list = response.xpath("//div[@style='padding: 6px 0 3px;']/a/@href")
                author_id_list = list(map(lambda x: x.split('/')[-1].split('?')[0], author_href_list))
                data_content['author_id'] = author_id_list
                data_user['author_id'] = author_id_list

                # 用户采集时间
                data_user['update_time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

                # 拼接用户信息连接
                user_info_url = []
                for i in author_id_list:
                    user_info_url.append('https://weibo.com/ajax/profile/info?custom='+i)

                gender_list, fans_num_list, follow_num_list, weibo_total_num_list, \
                total_num_list, location_list, description_list, tags_list = [], [], [], [], [], [], [], []
                for use_page in user_info_url:
                    time.sleep(2)
                    while True:
                        response = self.get_response(url=use_page, key_table_name=key_table_name, type='Json')
                        try:
                            user_detail = response['data']['user']
                            break
                        except KeyError:
                            print(f'''获取不到数据，重刷cookie后停止10分钟，现在时间：{datetime.now().strftime("%m-%d %H:%M:%S")}''')
                            time.sleep(300)
                            self.rewrite_cookies()   # 获取不到数据，重刷cookie后停止5分钟
                            continue

                    # 发布者性别，粉丝数，关注数，发布者总微博数，发布者转评赞数，发布者地区，发布者简介，发布者标签
                    gender_list.append((lambda x: '男' if x == 'm' else '女')(user_detail['gender']))
                    fans_num_list.append(int(user_detail['followers_count']))
                    follow_num_list.append(int(user_detail['friends_count']))
                    weibo_total_num_list.append(int(user_detail['statuses_count']))
                    try:
                        total_num_list.append(int(user_detail['status_total_counter']['total_cnt'].replace(',', '')))
                    except KeyError:
                        total_num_list.append(0)
                    location_list.append(user_detail['location'])
                    description_list.append(user_detail['description'])
                    try:
                        tags_list.append(user_detail['verified_reason'])
                    except KeyError:
                        tags_list.append('')
                data_user['gender'] = gender_list
                data_user['fans_num'] = fans_num_list
                data_user['follow_num'] = follow_num_list
                data_user['weibo_total_num'] = weibo_total_num_list
                data_user['total_num'] = total_num_list
                data_user['location'] = location_list
                data_user['description'] = description_list
                data_user['tags'] = tags_list
                print(f'''data_content:{data_content}''')
                print(f'''data_user:{data_user}''')
                # yield data_content, data_user

                # 插入数据
                storage = Storage()
                storage.insert_data(data_content=data_content, data_user=data_user, key_table_name=key_table_name)
                print(
                    f'''{'到'.join(list(map(lambda x: x.replace('3A', ''), url.split('&')[-2].split('%')[1:])))}爬取完毕!...''')


if __name__ == '__main__':
    my_weibo = WeiboSpider()
    word = 'ste'
    key_table = 'PEL_STE'
    start_time = '2024-06-16'
    end_time = '2024-06-20'
    my_weibo.get_weibo_data(keyword=word, key_table_name=key_table, start_time=start_time, end_time=end_time)



