# _*_coding:utf-8 _*_
# @Time    :2020/11/27 19:29
# @Author  :lyc
# @Email   :lyc0209@qq.com
# @FileName:weibo_spider.py

import requests
import json
from mysql import *
from send_email import *
import re
import time


def load_uid():
    """
    从user.json中加载用户uid
    :return: uid
    """
    with open('user.json', encoding='utf-8') as f:
        return json.load(f)["uid"]


class SpiderWeiBo:
    def __init__(self):
        self.uid = load_uid()
        self.index_url = 'https://m.weibo.cn/api/container/getIndex?type=uid&value=' + self.uid  # 首页url
        self.content_url = self.index_url + "&containerid="  # 微博url
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Linux; U; Android 8.1.0; zh-cn; BLA-AL00 Build/HUAWEIBLA-AL00) "
                          "AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/8.9 "
                          "Mobile Safari/537.36 "
        }
        self.weibo_nums = 0

    def start(self):
        self.get_container_id()

        mysql = Mysql()
        sql_select = """select count(*) from weibo"""  # 查询记录条数，若记录为空，则爬取所有微博入库。若有记录，则爬取最新的微博
        counts = mysql.selectDbOne(sql_select)
        mysql.closeDb()
        if int(counts) == 0:
            print("第一次执行，所有微博入库")
            self.get_all_weibo_contents()
        else:
            print("第n次执行，新微博入库")
            self.get_new_weibo()

    def get_container_id(self):
        """
        获取containerId, 用于拼接url从用户主页跳转到微博，从而得到所有的微博内容
        :return:None
        """
        res = requests.get(url=self.index_url, headers=self.headers).content.decode('utf-8')
        container_id = json.loads(res).get('data')["tabsInfo"]["tabs"][1]["containerid"]
        self.content_url += container_id

    def get_weibo_nums(self):
        """
        获取微博总数
        :return: int
        """
        res = requests.get(url=self.index_url, headers=self.headers).content.decode('utf-8')
        self.weibo_nums = json.loads(res).get('data')["userInfo"]["statuses_count"]

    def insert_weibo_to_mysql(self, card):
        """
        获取card对象中微博详细内容并插入数据库
        :param card:
        :return:
        """
        # url: 微博详细地址, text: 微博文本内容, create_date: 创建时间
        url = card.get('scheme')
        # TODO: 如果时转发的微博，需要递归获取原微博
        text = card.get("mblog")["raw_text"]
        # create_date = card.get("mblog")["created_at"]   # 日期。本年度的没有年份，需要加上
        # 获取时间
        res_detail = requests.get(url=url, headers=self.headers).content.decode('utf-8')
        value = '"created_at": "(.*?)"'
        create_date = re.findall(value, str(res_detail))[0]
        mysql = Mysql()
        sql_insert = "INSERT INTO weibo(uid, create_date, url, text) VALUES ('%s', '%s',  '%s',  '%s')" % (
            self.uid, create_date, url, text)
        mysql.insertDB(sql_insert)
        if int(card.get("mblog")["pic_num"]) > 0:  # 如果该微博有图片：入库
            sql_select = """select id from weibo order by id DESC limit 1"""  # 刚才插入的记录，在最后一条, 取出其id, 作为图片的外键
            weibo_id = mysql.selectDbOne(sql_select)
            for pic in card.get("mblog")["pics"]:
                photo_url = pic["large"]["url"]
                sql_insert = "INSERT INTO photo(weibo_id, url) VALUES ('%s', '%s')" % (weibo_id, photo_url)
                mysql.insertDB(sql_insert)

        # TODO: 获取评论信息
        mysql.closeDb()

    def get_new_weibo(self):
        """
        获取用户的最新
        :return:
        """
        mysql = Mysql()
        select = """select url from weibo"""  # 获取数据库中所有微博的url
        urls = mysql.selectDbAll(select)
        mysql.closeDb()

        url = self.content_url + "&page=1"
        res = requests.get(url=url, headers=self.headers).content.decode('utf-8')
        cards = json.loads(res).get('data').get('cards')
        for card in cards[::-1]:
            if card.get('scheme') in urls:  # 如果这一条微博存在于库中，则处理下一条
                continue
            self.insert_weibo_to_mysql(card)
            # 发送邮件
            send_email(card.get("mblog")["raw_text"])

    def get_all_weibo_contents(self):
        """
        获取用户的所有微博
        :return:
        """
        self.get_weibo_nums()
        i = int(self.weibo_nums / 10) + 1  # 获取总页数(每页十条微博)
        while i > 0:  # 尝试从最后一页从后向前遍历
            url = self.content_url + "&page=" + str(i)
            res = requests.get(url=url, headers=self.headers).content.decode('utf-8')
            cards = json.loads(res).get('data').get('cards')
            i -= 1
            if len(cards) == 0:
                continue
            for card in cards[::-1]:
                self.insert_weibo_to_mysql(card)
            print("第" + str(i + 1) + "页入库成功")
            time.sleep(3)   # 等待3秒, 防止触发反爬


if __name__ == '__main__':
    spider = SpiderWeiBo()
    spider.start()
