# 搜索关键词、博文
import copyheaders
import requests
import re
import json
import time
import random
from datetime import datetime
import datetime as dtime
import csv
from lxml import etree
from pathlib import Path

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3870.400 QQBrowser/10.8.4405.400'
}

burl = 'https://s.weibo.com/weibo?q={}&timescope=custom:{}-{}:{}-{}&Refer=g&page={}'

headers_m = {
    "Cookie":'SINAGLOBAL=319438869371.1958.1697274553153; SUB=_2A25IL-flDeRhGeBG7VcS9y3Nwj-IHXVr04mtrDV8PUJbkNANLWn-kW1NRhAS3AQFPsBpuQK7CiDyMa09FiqYxRbp; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhucEK.vC._Hq3xfR4Nu4mS5NHD95Qc1hqfe0M0eK.0Ws4DqcjiCrH0Uh-71Kqp; ULV=1697624743463:4:4:3:6239890865130.7705.1697624743400:1697356090320; XSRF-TOKEN=elvzsYtk_ZxA9POk_FyWHEkz; WBPSESS=rR4peIBWUq8q06lj8iZT2YLUqmERf-7UpIuaTbHm6TFBk-BfA8vVx6NjDFAAj3D0_cq6zGV6cAvunk_5eafZ4zEDxsy418lZa9TouI2sXCgk8tQ3YGmLpOOJRtIwVjh6fTpUoBMei3qlQHpO5cjdHg==',
    "Accept":'application/json, text/plain, */*',
    "Referer":'https://weibo.com/u/1735618041',
    "User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36'
}

def send_get(api,headers,params):
    print(f'>>>访问：{api}')
    while 1:
        try:
            res = requests.get(
                api,
                headers=headers,
                timeout=(4,5),
                params=params,

            )
            if res.status_code != 200:
                print(res.text)
                time.sleep(10)
                continue
            time.sleep(.4)
            return res.json()
        except Exception as e:
            print(f'some error:{e}')
            time.sleep(1)

def save_csv(csvname, row):
    print(row)
    save_path = output / f'{csvname}.csv'
    with open(save_path, 'a', encoding='utf-8-sig', newline='') as f:
        f_csv = csv.writer(f)
        f_csv.writerow(row)


def get_page(word, day):
    for ihour in range(0, 24):
        for page in range(1, 51):
            url = burl.format(word, day, ihour, day, ihour + 1, page)
            print(url)
            time.sleep(random.uniform(0, 0.05))
            r = requests.get(url, headers=headers)
            html = etree.HTML(r.text)
            divls = html.xpath('//div[@action-type="feed_list_item"]')
            pnum = len(divls)

            if pnum == 0 or '以下是您可能感兴趣的微博' in r.text:
                print(f'当前时间暂无数据：以下是您可能感兴趣的微博：{day} - {ihour}')
                break

            print(f'日期：{day}', f"小时：{ihour}", f"页码：{page}", f"页码数：{pnum}")

            for div in divls:

                mid = "".join(div.xpath("./@mid"))
                user = div.xpath('.//div[@class="info"]/div[2]/a/@href')

                if len(user) > 0:
                    userlink = user[0]
                    userid = re.findall('com\/(\d+)\?', userlink)[0]
                    username = div.xpath(
                        './/div[@class="info"]/div[2]/a/@nick-name')[0]

                    api = f'https://weibo.com/ajax/profile/detail?uid={userid}'
                    response = send_get(api, headers_m, {}).get("data")
                    ip_location = response.get("ip_location")

                else:
                    username = ""
                    userid = ""
                    ip_location = ""
                content_ = "".join(div.xpath(".//p[@node-type='feed_list_content_full']//text()")).strip()
                if content_ == "":
                    content_ = "".join(div.xpath(".//p[@node-type='feed_list_content']//text()")).strip()

                time_ = div.xpath('.//div[@class="content"]/div[@class="from"]/a/text()')
                if len(time_) > 0:
                    time_ = time_[0].strip()
                else:
                    time_ = ""





                attid = "".join(div.xpath(".//div[@class='card-act']/ul/li[3]//text()")).strip().replace("赞", "")
                coms = "".join(div.xpath(".//div[@class='card-act']/ul/li[2]//text()")).strip().replace("评论", "")
                ret = "".join(div.xpath(".//div[@class='card-act']/ul/li[1]//text()")).strip().replace("转发", "")
                row = [username, time_, content_, userid, mid, ret, coms, attid,ip_location]
                save_csv('博文_{}2'.format(word), row)


def get_day(word, day):
    get_page(word, day)


if __name__ == '__main__':
    print("请输入你想爬取的初始日期")
    print("格式为：2020-06-08")
    print('：>>>')
    start_day = input()
    print("请输入你想爬取的结束日期：>>>")
    print("格式为：2020-12-31")
    print('：>>>')
    end_day = input()

    words = ['想吃甜品']
    headers[
        'Cookie'] = 'SINAGLOBAL=319438869371.1958.1697274553153; SUB=_2A25IL-flDeRhGeBG7VcS9y3Nwj-IHXVr04mtrDV8PUJbkNANLWn-kW1NRhAS3AQFPsBpuQK7CiDyMa09FiqYxRbp; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhucEK.vC._Hq3xfR4Nu4mS5NHD95Qc1hqfe0M0eK.0Ws4DqcjiCrH0Uh-71Kqp; _s_tentry=weibo.com; Apache=6239890865130.7705.1697624743400; ULV=1697624743463:4:4:3:6239890865130.7705.1697624743400:1697356090320'
    output = 'weibo_files'
    output = Path(output).absolute()
    output.mkdir(exist_ok=True)

    for word in words:
        save_path = output / f'博文_{word}2.csv'
        with open(save_path, 'a', encoding='utf-8-sig', newline='') as f:
            f_csv = csv.writer(f)
            f_csv.writerow(['username', 'time_', 'content_', 'userid', 'mid', 'ret', 'coms', 'attid','ip_location'])
        days = [start_day, end_day]

        tempday = days[0]
        endday = datetime.strptime(days[-1], '%Y-%m-%d')

        while 1:
            print('---------------------------')
            print('now:', datetime.now())
            get_day(word, tempday)

            t = datetime.strptime(tempday, '%Y-%m-%d')
            next_day = t + dtime.timedelta(days=1)
            if next_day > endday:
                break
            tempday = next_day.strftime('%Y-%m-%d')
        print('now:', datetime.now())
