import random
import requests
import time
import pymysql
from emotion_analysis import *

# 使用了请求头+代理IP池+随机等待时间 反爬

# https://piao.qunar.com/ticket/detail_9354.html#from=mps_search_suggest
finishPage = 0
allList = []
page = 1
# 从文件中读取代理IP列表
f = open("IP.txt", "r")
file = f.readlines()
''' 连接数据库
主机：jzhangluo.com
端口号：1706
用户：root
密码：guet1314999hire
'''
cnx = pymysql.connect(
    host="jzhangluo.com",
    port=1706,
    user="root",
    password="guet1314999hire",
    database="monitor"
 )

# # 创建一个游标对象
cursor = cnx.cursor()


# 爬取给定景点ID和页码的评论的函数
def comment(sightId, page):
    # 浏览器F12查看到的评论信息列表链接
    url = "https://piao.qunar.com/ticket/detailLight/sightCommentList.json"
    # 遍历并分别存入列表，方便随机选取IP
    item = []
    for proxies in file:
        # 以换行符分割，转换为dict对象
        proxies = eval(proxies.replace('\n', ''))
        item.append(proxies)
    # 随机选取一个IP
    proxies = random.choice(item)
    # print(proxies)
    # 构造发起GET请求所需参数
    params = {
        "sightId": str(sightId),
        "index": str(page),
        "page": str(page),
        "pageSize": "10",
        "tagType": "0",
    }
    # 设置请求头，让网站认为爬虫是浏览器
    headers = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36",
    }
    # 发起GET请求获取评论数据
    res = requests.get(url=url, headers=headers, params=params, proxies=proxies, timeout=5)
    # 判断服务器返回数据是否正确，因为有时候会是乱码
    while res.text[1] == "r":
        res = requests.get(url=url, headers=headers, params=params, proxies=proxies, timeout=5)
    else:
        pass

    # 构造对应评论的原始链接
    link = 'https://piao.qunar.com/ticket/detailLight/sightCommentList.json?sightId=9354&index=' + str(
        i) + '&page=' + str(i) + '&pageSize=10&tagType=0'
    '''
    将HTTP响应的JSON数据解析为Python字典。然后，通过["data"]
    从解析后的字典中获取名为"data"的键对应的值
    '''
    results = res.json()["data"]
    for result in results["commentList"]:
        # 评论者名字
        author = result["author"]
        # 评论日期
        publishedDate = result["date"]
        # 总评分
        score = result["score"]
        # 评论内容
        text = result["content"]
        # ip地址
        ip = result["cityName"]
        # 评论情感类型
        emotion = emotion_analysis(text)
        # 执行插入数据的SQL语句
        sql = "INSERT INTO remark (remarkName, address, time, score, link, comments, emotion, platformID) VALUES (%s, %s, %s, %s, %s,%s,%s,3)"
        values = (author, ip, publishedDate, score, link, text, emotion)
        cursor.execute(sql, values)
        commentList = [author, ip, publishedDate, score, link, text]
        allList.append(commentList)
        # 提交事务
        cnx.commit()
        # 演示用
        print(values)
    # 为了防止对目标网站请求过频，设置休眠时间，随机数模拟真实人类操作
    # 休眠时间为1到10之间的随机浮点数
    time.sleep(random.uniform(1, 3))


if __name__ == "__main__":
    taskList = []
    #  去哪儿只展示前500页评论
    # 这里仅做展示，只爬取几页
    for i in range(1, 2):
        print('开始爬取第%s页' % i)
        comment(9354, i)
    # # 关闭游标和数据库连接
    cnx.close()
    cursor.close()