"""
Author: Hillsas
"""
import re
import os
import csv
import time
import copy
import json
import random
import pandas
import _pickle
import requests
from lxml import html


class Requests(object):
    def __init__(self):
        self.etree = html.etree
        self.headers_lower = {
            'accept-encoding': 'gzip, deflate, br',
            'accept-language': 'zh-CN,zh;q=0.9',
            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/92.0.4515.159 Safari/537.36'
        }

    def get_requests(self, url, headers=None, proxies=None, params=None, coding='utf-8'):
        while True:
            try:
                response = requests.get(url, params=params, headers=headers, proxies=proxies, timeout=30)
                response.encoding = coding
                return {
                    'text': response.text,
                    'response': self.etree.HTML(response.text),
                    'content': response.content
                }
            except Exception as e:
                print(e)


class IO(object):
    @staticmethod
    def save_pickle(data, name, mode):
        with open('{0}.pickle'.format(name), mode, 1) as write_file:
            _pickle.dump(data, write_file)

    @staticmethod
    def load_pickle(name):
        result = []
        with open('{0}.pickle'.format(name), 'rb', 1) as read_file:
            while True:
                try:
                    result.append(_pickle.load(read_file))
                except EOFError:
                    return result

    @staticmethod
    def write_header():
        with open('result.csv', 'w') as file:
            writer = csv.writer(file)
            writer.writerow(['博文ID', '评论时间', '评论内容', '评论人', '评论人ID'])

    @staticmethod
    def write(uid, data):
        with open('result.csv', 'a+') as file:
            writer = csv.writer(file)
            writer.writerow([uid, data['created_at'], data['text_raw'], data['user']['name'], data['user']['id']])

    @staticmethod
    def get_json(data):
        return json.loads(data)


class Main(object):
    def __init__(self):
        self.requests = Requests()
        self.io = IO()

    def get_search_list(self, keyword, t, time_scope=None):
        url = 'https://s.weibo.com/weibo'
        headers = copy.deepcopy(self.requests.headers_lower)
        headers[
            'accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
        headers['referer'] = 'https://weibo.com/hot/topic'
        headers['upgrade-insecure-requests'] = '1'
        headers[
            'cookie'] = 'UOR=www.baidu.com,weibo.com,www.baidu.com; SINAGLOBAL=3280087692066.3423.1632532596672; _s_tentry=weibo.com; Apache=9376308637629.207.1645413125376; ULV=1645413125998:2:1:1:9376308637629.207.1645413125376:1640780521724; WBtopGlobal_register_version=2022041913; SSOLoginState=1650424576; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFoAR9CPssf5ph4eaCM46sh5JpX5KMhUgL.Fo24eoq7eo-7e0z2dJLoI0YLxKML1hqL122LxKML1hqL122LxKML1hqL122LxKqL1hzLBK5LxKqL1hzLBK5LxKnLBKzL12zLxKqLBK5L1K2t; SCF=AjJ8ilaegGvufduEw4dTf6Zs1EXHCeIQ-KNss9sUOBKpUTthwuxBB_K7uqUfLwCBC63Am0TZ4EMBgZGM8_aJK0I.; SUB=_2A25Pdg0iDeRhGedH6VQR8ivMyD6IHXVsAnnqrDV8PUNbmtAfLXKhkW9NUNb9RhrxorZvjUN8-TPPobl6-FevjpRh; ALF=1683206386'

        for i in range(1, 51):
            params = ''
            print(i)
            if not time_scope:
                params = {
                    'q': keyword,
                    'page': i,
                    'sudaref': 'weibo.com'
                }
            else:
                if t == 'normal':
                    params = {
                        'q': keyword,
                        'page': i,
                        'typeall': '1',
                        'suball': '1',
                        'timescope': 'custom:{0}-0:{0}-23'.format(time_scope),
                        'Referer': 'g'
                    }

            response = self.requests.get_requests(url, headers=headers, params=params)['response']

            not_found = response.xpath("//p//text()")
            if '抱歉，未找到“{0}”相关结果。'.format(keyword) in not_found:
                return 'not_found'

            targets = response.xpath("//div[@action-type='feed_list_item']")
            for target in targets:
                try:
                    mid = target.xpath(".//@mid")[0]
                    content = target.xpath(".//p[@class='txt']//text()")
                    content = ''.join(content).strip()
                    blogger_info = target.xpath(".//div[@class='info']//a[@class='name']")[0]
                    blogger = blogger_info.text
                    blogger_id = blogger_info.xpath("./@href")[0]
                    blogger_id = re.findall(r"\d+", blogger_id)[0]
                    submit = target.xpath(".//p[@class='from']/a/text()")[0].strip()
                    cards = target.xpath(".//div[@class='card-act']//text()")
                    share, comment, good = cards[4], cards[6], cards[11]

                    data = {
                        'content': content,
                        'mid': mid,
                        'blogger': blogger,
                        'blogger_id': blogger_id,
                        'submit': submit,
                        'share': share,
                        'comment': comment,
                        'good': good
                    }

                    self.io.save_pickle(data, 'search_list', 'ab+')
                except IndexError:
                    continue
            time.sleep(1)

    def scrape_advanced_search_list(self, keyword, t):
        try:
            os.remove('search_list.pickle')
        except FileNotFoundError:
            pass

        for year in range(2022, 2023):
            for month in range(4, 6):
                if len(str(month)) == 1:
                    month = '0{0}'.format(month)
                for day in range(1, 32):
                    if len(str(day)) == 1:
                        day = '0{0}'.format(day)
                    search_date = '{0}-{1}-{2}'.format(year, month, day)
                    print(search_date)
                    if self.get_search_list(keyword, time_scope=search_date, t=t) == 'not_found':
                        continue

    def main(self):
        self.scrape_advanced_search_list('长沙事故', 'normal')
        data = self.io.load_pickle('search_list')
        with open('result.csv', 'w', encoding='utf-8') as file:
            writer = csv.writer(file)
            writer.writerow(['发布时间', '博文内容',  '用户名'])
            for each in data[:2000]:
                writer.writerow([each['submit'], each['content'], each['blogger']])


if __name__ == '__main__':
    Main().main()
