# @Time    : 2021-04-08 15:22
# @Author  : Jackcongx
# @FileName: bbs_crawler_2.py
# @Software: PyCharm
"""当宁消防网的爬虫：http://www.dangning.cn/index.php/index/index/news"""
import datetime
import os
import random
import re
import sys
import time
import uuid

import django
import parsel
import requests

# 线上运行爬虫脚本，添加项目路径
sys.path.insert(0, "/onecity/ayc_bbs")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ayc_bbs.settings')
django.setup()

from forum.models import Topic
from user_app.models import AccountInfo
from ayc_bbs import settings
from utils.oss_utils import RunOSS

ph = [
    "17912341111",
    "17912341112",
    "17912341113",
    "17912341114",
    "17912341115",
    "17912341116",
    "17912341117",
    "17912341118",
    "17912341119",
    "17912341121",
    "17912341122",
    "17912341123",
    "17912341124",
    "17912341125",
    "17912341126",
    "17912341127",
    "17912341128",
    "17912341129",
    "17912341131",
    "17912341132",
    "17912345678"
]

# ph = [
#     "13212789782",
#     "15727156182",
#     "18672238676"
# ]

# f = open('C:\\Users\\Administrator\\Desktop\\消防新闻.csv', mode='a', encoding='utf-8-sig', newline='')
# csv_writer = csv.DictWriter(f, fieldnames=['新闻标题', '新闻链接', '新闻内容', '图片链接'])
# csv_writer.writeheader()

headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
}


def strTimeProp(start, end, prop, frmt):
    stime = time.mktime(time.strptime(start, frmt))
    etime = time.mktime(time.strptime(end, frmt))
    ptime = stime + prop * (etime - stime)
    return int(ptime)


def randomDate(start, end, frmt='%Y-%m-%d %H:%M:%S'):
    return time.strftime(frmt, time.localtime(strTimeProp(start, end, random.random(), frmt)))


def get_html(html_url):
    response = requests.get(url=html_url, headers=headers)
    return response


def get_pars(html_data):
    selector = parsel.Selector(html_data)
    return selector


def main(url, title_list):
    html_data = get_html(url).text
    selector = get_pars(html_data)
    lis = selector.css('.newsRight .newsInfo').getall()
    j = 1
    for li in lis:
        if j > 5:
            continue
        li_selector = get_pars(li)
        title = li_selector.css('.info .title::text').get()  # 新闻标题
        # print(title)

        # 新闻标题如果存在数据库，过滤掉
        if title in title_list:
            continue
        # if title != "江南游记--苏州":
        #     continue
        deatil_url = "http://dangning.cn" + li_selector.css('a::attr(href)').get()  # 新闻详情链接
        # print(deatil_url)
        content_data = get_html(deatil_url).text
        content_selector = get_pars(content_data)
        # content_date = content_selector.css('.articleInfo span::text').getall()[-1]  # 新闻时间

        contents = content_selector.css('#contentWrap').get()  # 新闻内容

        # 从用户列表随机选取一个用户发帖
        account = AccountInfo.objects.get(mobile_phone=random.choice(ph))
        # print(account)
        images_dict = {}
        """<p><span style="font-family: 微软雅黑,Microsoft YaHei; font-size: 16px;"> </span></p>"""
        # 替换内容空行
        contents = re.sub(r'<p><span style="font-family: 微软雅黑,Microsoft YaHei; font-size: 16px;"> </span></p>', '',
                          contents)
        # 替换内容div宽度
        contents = re.sub(r'width:1140px;', '', contents)
        # 替换图片的完整链接
        contents = re.sub(r'/ueditor/php/upload/image', 'http://dangning.cn/ueditor/php/upload/image', contents)
        img_selector = get_pars(contents)
        # 获取图片所有链接列表
        images = img_selector.css('img::attr(src)').getall()  # 图片链接
        # print(images)
        # 图片存到oss
        for i in images:
            now = datetime.datetime.now()
            nonce = str(uuid.uuid4())
            random_name = now.strftime("%Y-%m-%d") + "/" + nonce
            filename = '{}.jpg'.format(random_name)
            oss = RunOSS(filename=filename, dirname=settings.OSS_UPLOAD_VOUCHER_DIR)
            code, ret = oss.img_url_io(i)
            if code:
                # print(src)
                images_dict[i] = ret
        # print(images_dict)
        # 替换图片链接成oss url
        for k, v in images_dict.items():
            contents = re.sub(r'{}'.format(k), v, contents)
        # print(contents)
        # 帖子选取当天的随机时间
        # start_time = content_date + " 08:30:00"
        # end_time = content_date + " 19:00:00"
        # created_on = randomDate(start_time, end_time)
        # print(created_on)
        dit = {
            'subject': title,
            'content': contents,
            'account_info': account,
            # 'created_on': created_on,
            # 'updated_on': created_on,
            # 'last_reply_on': created_on
        }
        # 写入excel
        # dit = {
        #     '新闻标题': title,
        #     '新闻链接': deatil_url,
        #     '新闻内容': contents,
        #     '图片链接': ''
        # }
        # csv_writer.writerow(dit)
        print("第{}条新闻>>>>>>爬取成功".format(j))
        data_list.append(Topic(**dit))
        j += 1


if __name__ == '__main__':
    # 查出用户所有帖子标题用于过滤
    account_list = AccountInfo.objects.filter(mobile_phone__in=ph)
    title_list = [i['subject'] for i in Topic.objects.filter(account_info__in=account_list).values('subject')]
    data_list = []

    """http://www.dangning.cn/index.php/index/index/news"""
    url = 'http://www.dangning.cn/index.php/index/index/news'
    # 开启线程
    # main_thread = threading.Thread(target=main, args=(url, page,))
    # main_thread.start()
    main(url, title_list)
    print("爬取帖子总数为：{}".format(len(data_list)))
    print("开始写入数据库-------------")
    # 写入db
    Topic.objects.bulk_create(data_list)
    print("数据库写入完成！")
