# -*- utf-8 -*-

import requests
import json
import re
from time import sleep
from bs4 import BeautifulSoup
import time
import datetime
from requests.adapters import HTTPAdapter
from time import strftime, localtime


def baojing(wenben):
    dingding = '钉钉报警接口'

    headers = {
        'Content-Type': 'application/json'
    }
    aaa = {"msgtype": "text",
            "text": {
                "content": wenben
            }}
    resp = requests.post(url=dingding, headers=headers, json=aaa)
    print(resp)

def get_bs(link):
    headers = {
        'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
        'Accept-Encoding':'gzip, deflate',
        'Accept-Language':'zh-CN,zh;q=0.9',
        'Connection':'keep-alive',
        'Cookie':'_list_image_display=true; _account_rid=d0Y5RUFCa0ZKdHFsQ29NT0MyMEpKOXNzTk1FZDI5d3AvcHpVYm40MDRlYz0tLXNxVFlGVHk1NS8xQW8xdUFPU2lZMnc9PQ%3D%3D--6cc10cf4ad0eb148968cec773423843c461c3072; _account_rtk=VnJGZmNOSWx0cUtFdHBEbStrL3p4M0pGRG14c3ZaenFQeTV6RzBMWlFrV2tWTWZraFhuMlllclJwMklQaGJhbi0tMW1Lc3pjL094SHF0bzhHL2VVNGFRUT09--cf546308637e04fed823f3670582a4861c5643ce; _account_ssk=1; _account_uid=dad46ac9ea9d95c4c1b8a636e99b0698; _guid=973bc3154bddde5c69b21c8b642366eb; Hm_lvt_06d6102d08ca53705ff73889bb63d367=1594966360,1594967036,1594967127,1595035697; Hm_lpvt_06d6102d08ca53705ff73889bb63d367=1595037328; _account_dat=eyJ1aWQiOiIxMTkzNjQzNzAifQ==; _giabbs_ss=SzRGWVNkKzVPeVdiNnVzMW9iNXg1RWUxN0VxdGhTRWl2R1RaSTV6Z2N0cEU1eXVub0grNVBJemVqbHB6QllQWWRDclZaaCtzU3FJOE9sSnRYbiszRDN6RlY5QURQdVdCcUxqMERLMFRCbHlKMnNTZldtTTh2eGtyclluUVhRWVhYS20zYjY0ZldLRTM0Zis1ZlVaNm0yVThvU3Q4QUt1YW14bjVVbGttYkZiZmdJb1RHL2VpaWE1YlB6VENVS2U2U2h0elIxY01ZVU0rSjVya01CdC9iQT09LS1oZ0p4cTRWbXFSeEhzZjN0OG5qSWJRPT0%3D--1cee077355acb6673d566c29c7d3ab9db8db9f2a; _shared_csrf_token=MDAwMXx8NGZjZDY5MjItMTNhMC00ZGE5LTk1MmItZWJjZjBmMTdkNmQ1fHxlZGU2OGNjNzljNjE3YmMxNmIzNDlkZjRmZGFkMTZiYg==',
        'Host':'www.giabbs.com',
        'Referer':'http://www.giabbs.com/center/recent',
        'Upgrade-Insecure-Requests':'1',
        'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'
    }
    s = requests.Session()
    s.mount('http://', HTTPAdapter(max_retries=3))  # 设置重试次数为3次
    s.mount('https://', HTTPAdapter(max_retries=3))
    response = s.get(link, headers=headers, timeout=150)
    ttt = response.text
    soup = BeautifulSoup(ttt, 'lxml')
    p = soup.find_all(class_='l-item-link l-title')

    f1 = open('已报警内容.txt', 'r+', encoding='utf-8')
    list1 = f1.readlines()  # readlines获得的直接就是列表了

    print('当前新帖子主题：')
    for line in p:
        line = str(line)
        title = line.split('">')[1].split('</a>')[0].strip()
        print(title)

        if 'href=' in line:
            linkk = line.split('href="')[1].split('"')[0]
            linkkk = url1 + linkk
            zw = s.get(linkkk, headers=headers, timeout=150)
            time.sleep(3)   # 不要访问太快吧
            ttt1 = zw.text
            ttt1 = ttt1.split('\n')
            for aa in ttt1:
                if '力派' in aa or '某派' in aa:
                    if '引用' in aa or aa == ' ':
                        continue
                    else:
                        aa2 = re.sub('<(S*?)[^>]*>.*?|<.*? />', '', aa)#替换html字符
                        aa3 = '舆论报警：' + '\t' + aa2 + '\n'
                        lianjie = linkkk + '\n'  # 为了判断是否已经报警过，报警时写入的文件再reareadlines元素会带换行符
                        if lianjie in list1:
                            print('■■■■■■■■■■■■■已经报警过了')
                        else:
                            baojing('报警链接:'+'\t'+linkkk)
                            baojing(aa3)
                            time.sleep(10)
                            f1.writelines(linkkk+'\n')
                            list1.append(linkkk + '\n')  #为了上面if的判断才加的\n
                            print(aa3 + linkkk + '\n')
                else:
                    continue
        else:
            print('网页似乎有问题，请检查')
    f1.close()


# f = open('已报警内容.txt', 'w', encoding='utf-8')


url = 'http://www.giabbs.com/center/recent'
url_page2 = 'http://www.giabbs.com/center/recent?_ajax_next_page=2'
url_page3 = 'http://www.giabbs.com/center/recent?_ajax_next_page=3'
url2 = 'http://www.giabbs.com/center/recent/page-2'
url2_page2 = 'http://www.giabbs.com/center/recent/page-2?_ajax_next_page=2'
url2_page3 = 'http://www.giabbs.com/center/recent/page-2?_ajax_next_page=3'

url1 = 'http://www.giabbs.com'

while 1:
    try:
        page1 = get_bs(url)
        print('开始爬取下拉加载页2')
        page1_2 = get_bs(url_page2)
        print('开始爬取下拉加载页3')
        page1_3 = get_bs(url_page3)
        print('开始扫描第二页')
        page2 = get_bs(url2)
        print('开始爬取下拉加载页2')
        page2_2 = get_bs(url2_page2)
        print('开始爬取下拉加载页3')
        page2_3 = get_bs(url2_page3)
        print('大家坛舆论监控完毕...30分钟后进行下一轮扫描'+'\n')
        print(strftime('%Y-%m-%d %H:%M:%S', localtime()))
        time.sleep(1800)
    except Exception as e:
        print(e)
        print('该网站目前无法访问，将在一小时后重新尝试')
        time.sleep(3600)
    continue
