# -*- coding:utf-8 -*-
# @time: 2022/4/2 22:00
# @Author: 天才小猪猪
# @Environment: Python 3.9
# @file: weibo_spider.py

import json
import csv
from operator import index
from pydoc import describe
import re
from webbrowser import get
import requests
import time
import random
import pandas as pd
import datetime
import redis

from redis import Redis,StrictRedis
import hashlib
conn = Redis(host='172.23.130.37', port=6379,password="65297122he")
import sqlalchemy
engine = sqlalchemy.create_engine('mysql+pymysql://hsw:65297122he@172.23.130.37:3306/scrapy')
#engine = sqlalchemy.create_engine('mysql+pymysql://root:jack@47.93.238.194:3306/weibo')
url ="http://www.66ip.cn/areaindex_11/1.html"
data = pd.read_html(url,header=None)[-1]
data.columns =["ip","port","1","2","3"]
property =[]
for i in range(len(data)-1):
   property.append("http://"+data["ip"][i+1]+":"+data["port"][i+1])



# 获取网页源码的文本文件
def get_html(url,user_id):
    headers = {
        "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:90.0) Gecko/20100101 Firefox/90.0",
        # "User-Agent": random.choice(USER_AGENTS),
        "Referer": "https://weibo.com/u/".format(user_id),
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "cache-control": "max-age=0",
        "accept-encoding": "gzip, deflate, br",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "x-xsrf-token": "aliZ2wYK_Y_oEKGRcYM9tOJh", #需要修改的内容
        "x-requested-with": "XMLHttpRequest",
        "sec-ch-ua": '" Not A;Brand";v="99", "Chromium";v="98", "Microsoft Edge";v="98"',
        "sec-fetch-mode": "navigate"
                          ""
    }
    cookie = {
        # 需要修改的内容
        'Cookie':'SINAGLOBAL=8302648825032.237.1636007534307; ULV=1650447227340:9:2:1:9976528651052.51.1650447227338:1649905744828; UOR=,,www.techweb.com.cn; SUB=_2A25PW6VUDeRhGeNJ6VIS-S7EzDWIHXVsEJGcrDV8PUNbmtAfLWjWkW9NS_5D5ZewZjHXqIw4X_OQnm2QWf4FLLg6; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhmxpO6_7Bbx4r5qcvFEF1M5JpX5KzhUgL.Fo-Neo501K5RS0.2dJLoI0qLxK-LBo5L12qLxK-LBo2L1h2LxK-LB-BLBK.LxKML1-2L1hBLxK-L1K5L12eLxKqLBK2L12et; login_sid_t=821fd84b0bdcfd6ac319a74fd8270505; cross_origin_proto=SSL; PC_TOKEN=5ed7ffbcf8; WBStorage=4d96c54e|undefined; _s_tentry=passport.weibo.com; wb_view_log=1920*10801; Apache=9976528651052.51.1650447227338; ALF=1681983618; SSOLoginState=1650447619; XSRF-TOKEN=u1lH4YL9G6GRJWHy-7yOauf1; WBPSESS=G9Ff9BQjWfN2Zb8Vj7-gc-yXFs3Y9sWM72U5flc0bOD_g3vusEwwgCVT1Zq8J4AHtyRQtKwyQ8ZeFytfBXqGKlcNt3C2aJU9uAOTZO-elWvRIkZZBaMviMWmwIMOFLrRy4k320KHll0ueqcttAK5LQ=='
    }
    response = requests.get(url, headers=headers , cookies=cookie,proxies={'http':random.choice(property)})
    response.encoding = "utf-8"  # response.apparent_encoding
    # time.sleep(0.1)   # 加上3s 的延时防止被反爬
    responses =response.text
    if "414 Request-URI Too Large" in str(responses):
        print("暂停200s：",url)
        time.sleep(200)
        return get_html(url,user_id)
    else:    
        try:
            json.loads(responses)
        except Exception as e:
            print("读取失败:",e)
            print("暂停3秒")
            print(responses)
            time.sleep(random.randint(1,10))
            get_html(url,user_id)
        return responses

def get_user_id(user_id ,page=1,results =[],since_id =""):
    if since_id =="":
        url = "https://weibo.com/ajax/statuses/mymblog?uid={0}&page={1}&feature=0".format(user_id,page)
    else:
        url = "https://weibo.com/ajax/statuses/mymblog?uid={0}&page={1}&feature=0&since_id={2}".format(user_id,page,since_id)
    data = get_html(url,user_id)
    try:
        print("正在爬取页面:",page)
        datax =json.loads(data)
        lists = datax["data"]["list"]
        page_id = datax["data"]['since_id']
        results.extend(lists)
        while ((len(lists)>0) and ("kp" in str(page_id)) and (int(page)<=30)):
            page =page+1
            time.sleep(2)
            return get_user_id(user_id,page,results,since_id=page_id)
        else:
            return results
    except:
        results

def get_first_comment(id, uid, max_id=-1, result_datax=[], second_result_data=[]):
    if max_id == -1:
        url = "https://weibo.com/ajax/statuses/buildComments?flow=0&is_reload=1&id={0}&is_show_bulletin=2&is_mix=0&count=10&uid={1}".format(
            id, uid)
    elif max_id > 0:
        url = "https://weibo.com/ajax/statuses/buildComments?flow=0&is_reload=1&id={0}&is_show_bulletin=2&is_mix=0&max_id={1}&count=20&uid={2}".format(
            id, max_id, uid)
    try:
        data = get_html(url,user_id=uid)
        dataz = json.loads(data)
    except:
        print("暂停300s")
        time.sleep(3)
        print(data)
        data = get_html(url,user_id=uid)
        dataz = json.loads(data)
    main_max_id = dataz['max_id']
    total = 0
    if len(dataz["data"]) > 0:
        for data1 in dataz["data"]:
            first_id = data1['id'] # 一级ID
            first_idstr = data1["idstr"] 
            first_max_id = data1["max_id"] #一级MAX_id
            mid = data1["mid"] #一级MID
            first_text_row = data1["text_raw"] # 一级文本
            total_number = data1["total_number"] #一级总评论数据
            user_id = data1["user"]["id"] # 一级回复userid
            user_name = data1["user"]["screen_name"] #一级用户名称
            floor_number =data1["floor_number"] # 一级点赞数据
            first_create_ated =data1["created_at"]
            total = total + total_number
            print("一级数据:", first_text_row, user_id, user_name, total_number,datetime.datetime.now())
            if total_number > 5:
                a_data = get_second_comments(first_id, user_id, max_id=-1, first_data=[],list_a=[])
                try:
                    second_result_data.extend(a_data)
                except:
                    print("a_data：", a_data)
            result_datax.append([id, uid, first_id, first_max_id, mid, first_text_row, total_number, user_id, user_name, floor_number,first_create_ated])
        if (main_max_id > 0) & (total > 0):
            return get_first_comment(id, uid, main_max_id, result_datax=result_datax, second_result_data=second_result_data)
        else:
            return result_datax, second_result_data
    else:
        return result_datax, second_result_data

def get_second_comments(id, user_id, max_id=-1, first_data=[],list_a =[]):
    if max_id == -1:
        url = "https://weibo.com/ajax/statuses/buildComments?is_reload=1&id={0}&is_show_bulletin=2&is_mix=1&fetch_level=1&max_id=0&count=20&uid={1}".format(
            id, user_id)
    elif max_id> 0:
        url = "https://weibo.com/ajax/statuses/buildComments?is_reload=1&id={0}&is_show_bulletin=2&is_mix=1&fetch_level=1&max_id={1}&count=20&uid={2}".format(id, max_id, user_id)
    print(url)
    try:
        data = get_html(url,user_id)
        dataz = json.loads(data)
    except:
        #time.sleep(5)
        data = get_html(url,user_id)
        dataz = json.loads(data)

    second_max_id = int(dataz["max_id"])
    list_a.append(second_max_id)
    datas = dataz["data"]

    if datas!= None:
        #print("二级评论")
        for z_data in datas:
            second_id = z_data["id"] # 二级ID
            second_created_at = z_data["created_at"] #二级创建时间
            second_mid = z_data["mid"] #二级mid
            second_rootid = z_data["rootid"] # 二级rootid
            # sync_id = z_data["sync_id"]
            # sync_uuid = z_data["sync_uuid"]
            second_reply_text_raw = z_data["text_raw"] #二级文本
            reply_user_id = z_data["user"]["id"] # 二级用户id
            reply_user_name = z_data["user"]["screen_name"] #二级用户名
            print("二级评论：", second_reply_text_raw, second_id, reply_user_id, reply_user_name,datetime.datetime.now())
            first_data.append(
                [id,user_id,second_id, second_created_at, second_mid, second_rootid, second_reply_text_raw, reply_user_id,
                 reply_user_name])
        if (second_max_id != 0) & (second_max_id not in list_a):
            list_a.append(second_max_id)
            #print("翻页")
            #print(second_max_id)
            return get_second_comments(id, user_id, max_id=second_max_id, first_data=first_data,list_a=list_a)
        else:
            return first_data
    else:
        return first_data

def spyder_user(user_id):
    results = get_user_id(user_id=user_id,page=1,results=[])
    #print("爬取数据的长度为:",len(results))
    for result in results:
        main_created_ated = result["created_at"]
        id =result["id"]
        text_raw =result["text_raw"]
        user_id =result["user"]["id"]
        mid =result["mid"] 
        user_name = result["user"]["screen_name"] #用户名称
        common_counts =result["comments_count"] #评论数目
        attitudes_count =result["attitudes_count"] # 点赞数目
        reposts_count = result["reposts_count"]#用户评论数目
        data1 =pd.DataFrame([[id,user_id,user_name,text_raw,common_counts,attitudes_count,reposts_count,main_created_ated,user_id]],columns=["id","user_id","user_name","text_raw","comments_count","attitudes_count","reposts_count","main_created_ated","topic"])
        data1.to_sql("weibo_main_user",con=engine,if_exists="append",index=False)
        if common_counts >1:
            result_datax, second_result_data = get_first_comment(id, uid =user_id, max_id=-1, result_datax=[], second_result_data=[])
            result_datax = pd.DataFrame(result_datax,columns=["id" , "uid" ,"first_id", "first_max_id", "mid", "first_text_row", "total_number", "user_id", "user_name", "floor_number","first_create_ated"])
            if len(second_result_data)>0:
                second_result_data = pd.DataFrame(second_result_data,columns=["id","user_id","second_id", "second_created_at", "second_mid", "second_rootid", "second_reply_text_raw", "reply_user_id",'reply_user_name'])
            else:
                second_result_data = pd.DataFrame(columns=["id","user_id","second_id", "second_created_at", "second_mid", "second_rootid", "second_reply_text_raw", "reply_user_id",'reply_user_name'])
            try:
                result_datax.to_sql("weibo_first_data_user",con=engine,if_exists="append",index=False)
                if len(second_result_data)>0:
                    second_result_data.to_sql("weibo_second_data_user",con=engine,if_exists="append",index=False)
            except Exception as e:
                print("error",e)
                print("errer:",result_datax)
                print("errer:",second_result_data)
                pass
        else:
            print("only one data",text_raw)


def get_user(sql):
    data = pd.read_sql(sql,engine)
    return data

if __name__ == '__main__':
    sql ="""SELECT distinct(user_id) as user FROM scrapy.weibo_main 
            where user_id not in (select distinct(user_id) as user_id from scrapy.weibo_main_user)
            or user_id not in (select user_id from scrapy.weibo_had_user)
         """
    data = pd.read_sql(sql,engine)
    for user_id in data["user"][11000:]:
        time.sleep(random.randint(1,2))
        print("正在爬取user_id:",user_id)
        idx = hashlib.sha256(str(user_id).encode()).hexdigest()
        ex = conn.sadd('weibo_user', idx)
        if ex ==1:
            print("进入爬取 ")
            try:
                pd.DataFrame([[user_id]], columns=["user_id"]).to_sql("weibo_had_user", con=engine, if_exists="append",
                                                          index=False)
                spyder_user(user_id)
            except Exception as R:
                print("出错:",R)
                continue;
        else:
            pd.DataFrame([[user_id]],columns=["user_id"]).to_sql("weibo_had_user",con=engine,if_exists="append",index=False)
            print("已经爬取",user_id)
