# @Time    : 2021-04-06 11:03
# @Author  : Jackcongx
# @FileName: bbs_crawler_1.py
# @Software: PyCharm
"""消防资源网-消防头条的爬虫：http://www.1190119.com/"""
import datetime
import os
import random
import re
import sys
import uuid

import django
import parsel
import requests

# 线上运行爬虫脚本，添加项目路径
sys.path.insert(0, "/onecity/ayc_bbs")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ayc_bbs.settings')
django.setup()

from forum.models import Topic
from user_app.models import AccountInfo
from ayc_bbs import settings
from utils.oss_utils import RunOSS

ph = [
    "17912341111",
    "17912341112",
    "17912341113",
    "17912341114",
    "17912341115",
    "17912341116",
    "17912341117",
    "17912341118",
    "17912341119",
    "17912341121",
    "17912341122",
    "17912341123",
    "17912341124",
    "17912341125",
    "17912341126",
    "17912341127",
    "17912341128",
    "17912341129",
    "17912341131",
    "17912341132",
    "17912345678"
]

# ph = [
#     "13212789782",
#     "15727156182",
#     "18672238676"
# ]

# f = open('C:\\Users\\Administrator\\Desktop\\消防新闻.csv', mode='a', encoding='utf-8-sig', newline='')
# csv_writer = csv.DictWriter(f, fieldnames=['新闻标题', '新闻链接', '新闻内容', '图片链接'])
# csv_writer.writeheader()

headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
}


def get_html(html_url):
    response = requests.get(url=html_url, headers=headers)
    return response


def get_pars(html_data):
    selector = parsel.Selector(html_data)
    return selector


def main(url, page, title_list):
    html_data = get_html(url).text
    selector = get_pars(html_data)
    lis = selector.css('.boxcon li').getall()
    j = 1
    for li in lis:
        # if j > 4:
        #     continue
        li_selector = get_pars(li)
        title = li_selector.css('a::text').get()  # 新闻标题

        # 新闻标题如果存在数据库，过滤掉
        if title in title_list:
            continue
        deatil_url = "https://tt.1190119.com/" + li_selector.css('a::attr(href)').get()  # 新闻详情链接
        # print(deatil_url)
        content_data = get_html(deatil_url).text
        content_selector = get_pars(content_data)
        contents = content_selector.css('.b_con').get()  # 新闻内容
        # contents = content_selector.css('#page-content').get()  # 新闻内容

        if contents:
            # 从用户列表随机选取一个用户发帖
            account = AccountInfo.objects.get(mobile_phone=random.choice(ph))
            images_dict = {}
            # 删除所有超链接
            contents = re.sub(
                r'href="(https://v.1190119.com/.+?\.html)"|href="(https://b.1190119.com/+?)"|href="(https://b.1190119.com/.+?\.htm)"|href="(http://gf.1190119.cn/.+?\.htm)"|href="(https://gf.1190119.com/.+?\.htm)"',
                '', contents)
            # 删除所有下划线
            contents = re.sub(
                r'text-decoration-line: underline', 'text-decoration-line: none', contents)
            #
            # 修复图片不显示问题
            contents = re.sub(
                r'crossorigin="anonymous"', '', contents)
            # 替换图片的完整链接
            contents = re.sub(r'/w/uploads/', 'https://b.1190119.com/w/uploads/', contents)
            img_selector = get_pars(contents)
            # 获取图片所有链接列表
            images = img_selector.css('.b_con img::attr(src)').getall()  # 图片链接
            # print(images)
            # 图片存到oss
            for i in images:
                now = datetime.datetime.now()
                nonce = str(uuid.uuid4())
                random_name = now.strftime("%Y-%m-%d") + "/" + nonce
                filename = '{}.jpg'.format(random_name)
                oss = RunOSS(filename=filename, dirname=settings.OSS_UPLOAD_VOUCHER_DIR)
                code, ret = oss.img_url_io(i)
                if code:
                    # print(src)
                    images_dict[i] = ret
            # print(images_dict)
            # 替换图片链接成oss url
            for k, v in images_dict.items():
                contents = re.sub(r'{}'.format(k), v, contents)
            dit = {
                'subject': title,
                'content': contents,
                'account_info': account
            }
            print("第{}页，第{}条新闻>>>>>>爬取成功".format(page, j))
            data_list.append(Topic(**dit))
            j += 1


if __name__ == '__main__':
    print("开始-----")
    # 查出用户所有帖子标题用于过滤
    account_list = AccountInfo.objects.filter(mobile_phone__in=ph)
    # account = AccountInfo.objects.get(mobile_phone="18672238676")
    title_list = [i['subject'] for i in Topic.objects.filter(account_info__in=account_list).values('subject')]
    data_list = []
    for page in range(1, 2):
        print(page)
        """https://tt.1190119.com/?&page=2"""
        """https://b.1190119.com/w/uploads/allimg/210325/1204322Z6-0.jpg"""
        url = 'https://tt.1190119.com/?&page={}'.format(page)
        # 开启线程
        # main_thread = threading.Thread(target=main, args=(url, page,))
        # main_thread.start()
        main(url, page, title_list)
    print("爬取帖子总数为：{}".format(len(data_list)))
    print("开始写入数据库-------------")
    # 写入db
    Topic.objects.bulk_create(data_list)
    print("数据库写入完成！")
