# @Time    : 2021-04-13 17:46
# @Author  : Jackcongx
# @FileName: bbs_crawler_3.py
# @Software: PyCharm
"""消防百事通网站的爬虫：https://new.fire114.cn/news/index"""
import datetime
import os
import random
import re
import sys
import time
import uuid

import django
import parsel
import requests
from selenium.webdriver.common.keys import Keys

# 线上运行爬虫脚本，添加项目路径
sys.path.insert(0, "/onecity/ayc_bbs")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ayc_bbs.settings')
django.setup()

from forum.models import Topic
from user_app.models import AccountInfo
from ayc_bbs import settings
from utils.oss_utils import RunOSS

headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
}

ph = [
    "17912341111",
    "17912341112",
    "17912341113",
    "17912341114",
    "17912341115",
    "17912341116",
    "17912341117",
    "17912341118",
    "17912341119",
    "17912341121",
    "17912341122",
    "17912341123",
    "17912341124",
    "17912341125",
    "17912341126",
    "17912341127",
    "17912341128",
    "17912341129",
    "17912341131",
    "17912341132",
    "17912345678"
]

# ph = [
#     "13212789782",
#     "15727156182",
#     "18672238676"
# ]

from selenium import webdriver


def get_html(html_url):
    response = requests.get(url=html_url, headers=headers)
    return response


def get_pars(html_data):
    selector = parsel.Selector(html_data)
    return selector


def main():
    # 查出用户所有帖子标题用于过滤
    account_list = AccountInfo.objects.filter(mobile_phone__in=ph)
    title_list = [i['subject'] for i in Topic.objects.filter(account_info__in=account_list).values('subject')]
    data_list = []

    # 实例化谷歌浏览器对象
    browser = webdriver.Chrome()
    # 请求目标网址
    browser.get('https://new.fire114.cn/news/index')

    # 控制浏览器向下滚动页面
    # for i in range(0, 100):
    #     browser.find_element_by_tag_name('body').send_keys(Keys.ARROW_DOWN)  # 在这里使用模拟的下方向键
    #     time.sleep(0.1)

    selector = browser.find_elements_by_css_selector('.media-title a:link')
    j = 1
    for i in selector:
        # 获取新闻详情页链接
        detail_url = i.get_attribute('href')
        # 解析详情页
        html_data = get_html(detail_url).text
        detail_selector = get_pars(html_data)
        # 获取新闻标题
        title = detail_selector.css('.ls_mid_title::text').get()
        print(title)
        # 新闻标题如果存在数据库，过滤掉
        if title in title_list:
            print("帖子：{}已存在,过滤----".format(title))
            continue
        # 获取新闻内容
        contents = detail_selector.css('.ls_mid_content').get()  # 新闻内容

        # 删除所有超链接
        contents = re.sub(r'href="(.*?)"', '', contents)

        # 删除文章底部标注
        contents = re.sub(
            r'<div style="border-top:1px dashed #bbb;font-size:12px;color:#999;">声明：该文观点仅代表作者本人，消防百事通系信息发布平台，消防百事通仅提供信息存储空间服务。</div>',
            '',
            contents)
        # print(contents)
        images_dict = {}
        # 替换图片的完整链接
        contents = re.sub(r'https://new.fire114.cn/uploads/kindeditor/image', '/uploads/kindeditor/image', contents)
        contents = re.sub(r'/uploads/kindeditor/image', 'https://new.fire114.cn/uploads/kindeditor/image', contents)
        contents = re.sub(r'https://new.fire114.cn/ueditor/php/upload/image', '/ueditor/php/upload/image', contents)
        contents = re.sub(r'/ueditor/php/upload/image', 'https://new.fire114.cn/ueditor/php/upload/image', contents)
        # print(contents)
        img_selector = get_pars(contents)
        # 获取该新闻的图片所有链接列表
        images = img_selector.css('img::attr(src)').getall()  # 图片链接
        """https://new.fire114.cn/uploads/kindeditor/image/20210331/20210331112615_88045.jpg"""
        """/uploads/kindeditor/image/20210413/20210413165240_94034.jpg"""
        """/ueditor/php/upload/image/20210413/1618285507287088.png"""
        """https://new.fire114.cn/ueditor/php/upload/image/20210122/1611298016786364.jpg"""
        # 遍历图片列表存到oss
        for i in images:
            now = datetime.datetime.now()
            nonce = str(uuid.uuid4())
            random_name = now.strftime("%Y-%m-%d") + "/" + nonce
            filename = '{}.jpg'.format(random_name)
            oss = RunOSS(filename=filename, dirname=settings.OSS_UPLOAD_VOUCHER_DIR)
            try:
                code, ret = oss.img_url_io(i)
                if code:
                    images_dict[i] = ret
            except Exception as e:
                pass
        # print(images_dict)
        # 替换图片链接成oss url
        for k, v in images_dict.items():
            contents = re.sub(r'{}'.format(k), v, contents)
        # 从用户列表随机选取一个用户发帖
        account = AccountInfo.objects.get(mobile_phone=random.choice(ph))
        dit = {
            'subject': title,
            'content': contents,
            'account_info': account,
        }
        print("第{}条新闻>>>>>>爬取成功".format(j))
        data_list.append(Topic(**dit))
        j += 1
        print('--------------------------')
    time.sleep(5)
    browser.close()  # 关闭浏览器

    print("爬取帖子总数为：{}".format(len(data_list)))
    print("开始写入数据库-------------")
    # 写入db
    Topic.objects.bulk_create(data_list)
    print("数据库写入完成！")


if __name__ == '__main__':
    main()
