# -*- coding: utf-8 -*-

# Author: 一根鱼骨棒
# Date: 2020-09-23 16:17:06
# LastEditTime: 2021-01-02 09:10:49
# LastEditors: 一根鱼骨棒
# Description: 本开源代码使用GPL 3.0协议
# Software: VScode
# Copyright 2020 迷舍

from function import *

# 处理图片


def dealPicture(content, source_url='', target_url=TARGET_URL):
    '''
    获取图片并保存,对内容中的图片占位符进行处理\n
    @param content : 文本\n
    @return {list} ：0缩略图，1文字内容列表\n
    '''

    img_pattern = r'<img.*src="(.*?)".*?'
    new_content = []
    img_list = []
    thumb = ''
    for eachline in content:
        img = re.search(img_pattern, eachline)
        if img != None:
            # 获取图片src
            img_src = re.search(img_pattern, eachline).group()
            # img_src = re.search('src=(.*)(")', img_src).group()
            img_src = re.search(r'.+?src="(\S+)"', img_src).group()
            img_src = img_src.split('src=')[1].strip('"')
            # 将图片插入内容对应位置
            if "space.gif" in img_src:
                eachline = ""
            else:
                eachline = "\n<p style='text-align:center;'><img src='" + \
                    target_url+img_src.split('/')[-1]+"' /></p>"
                img_list.append(img_src)
        new_content.append(eachline)
    #########图片列表输出#########
    if DEBUG:
        print(img_list)
    ############################
    for img in img_list:
        if '//' in img:
            img_url = "http://"+img.split("//")[-1]
            img = img.split('/')[-1]
        else:
            img_url = source_url+img
        img_data = requests.get(img_url, headers=headers).content
        if os.path.exists(download_asset_path+img) or DOWNLOAD_PIC == False:
            continue
        with open(download_asset_path+img, "wb") as target:
            target.write(img_data)
    if DOWNLOAD_PIC:
        # 处理图片尺寸
        resizePicture(img_list)
        # 处理缩略图
        thumb = setThumb(img_list)
    content = new_content
    # print("图片处理完成")
    return {'thumb': thumb, 'content': content}


def resizePicture(img_list):
    '''
    如果图片尺寸不符合要求，则调整图片\n
    @param content {list}:内容\n

    '''
    for each in img_list:
        each = each.split('/')[-1]
        img = cv2.imread(download_asset_path+each.split('/')[-1])
        try:
            width, height = img.shape[1], img.shape[0]
        except:
            print("未读取到图片")
            return
        scale = (height/width)
        if int(width) > 900:
            img = cv2.resize(img, (700, int(700*scale)))
        if int(width) < 500:
            img = cv2.resize(img, (500, int(500*scale)))
        cv2.imwrite(download_asset_path+each, img)
    # print("尺寸调整完成")
    return
# 设置缩略图


def setThumb(img_list):
    '''
    设置缩略图,新华军事和网评的缩略图在列表页，如果图片尺寸不符合要求，则重新选图片\n
    @param content {list}:内容\n
    @return {url} thumb \n
    '''
    thumb = ""
    for each in img_list:
        each = each.split('/')[-1]
        img = cv2.imread(download_asset_path+each)
        try:
            width, height = img.shape[1], img.shape[0]
        except:
            return thumb

        if float('%.1f' % (height/width)) > 1.1 or int(width) < 500:
            print("图片大小不恰当")
            continue
        else:
            thumb = TARGET_URL+each
            print("设置缩略图成功")
            # print("thumb"+thumb)
            break
    return thumb

# 处理视频


def dealVideo(content, source_url='', target_url=TARGET_URL):
    '''
    TODO 视频保存，视频地址，script插入
    获取视频并保存,对内容中的视频占位符进行处理\n
    @param content : 文本\n
    @return {list} ：新的内容列表\n  
    '''
    video_pattern = r'<iframe.*class="pageVideo".*src="(.*?)"'
    new_content = []
    video_count = 1
    video_list = []
    for eachline in content:
        video = re.search(video_pattern, eachline)
        if video != None:
            # 获取视频src
            video_src = re.search(video_pattern, eachline).group()
            video_src = re.search('src=(.*)(")$', video_src).group()
            video_src = video_src.split('src=')[1].strip('"')
            # 真实地址获取
            video_src = video_src.replace("playType=0", "playType=1")
            video_src = video_src.replace("getPlayPage", "getConfigs")
            video_src = video_src.replace("&amp;", "&")
            if video_src != " " and ("http" in video_src) and DOWNLOAD_VIDEO:
                data = requests.get(video_src, headers=headers).content
                # json转换
                data = json.loads(data)
                # 获取视频地址
                video_url = data["result"]["videoInfos"]['src']
                video_title = video_url.split('/')[-1]
                video_list.append(TARGET_URL+video_title)
                # video_thumb=data["result"]["videoInfos"]['poster']
                Process(target=idmDownloader, args=(
                    IDM, video_url, download_asset_path, video_title)).run()
            # 插入内容对应位置
            # print(eachline)
            if video_count == 1:
                eachline = ""
            else:
                eachline = "\n<div id=video"+str(video_count)+"></div>"
            video_count = video_count+1
        new_content.append(eachline)
    # print(new_content)
    content = new_content
    # print("视频处理完成")
    return {"video_list": video_list, "content": content}

# 清理文章中不必要的内容


def cleanContent(content):
    '''
    清除空行和注释\n
    @param content : 文本\n
    @return {list} ：新的内容列表\n 
    '''
    flags = ["责任编辑", "延伸阅读", "链接：", "链接:", "相关新闻", "【纠错】", "分享到"]
    new_content = []
    for eachline in content:
        # 清除注释
        eachline = eachline.strip()
        if eachline.startswith("<!--") and eachline.endswith("-->"):
            continue

        # # 清除脚本内容 遇到图片和文字在一行的时候会出问题
        # if "//" in eachline and hasChinese(eachline):
        #     continue
        # 结束判断
        end = False
        for flag in flags:
            if flag in eachline:
                end = True
                break
        if end:
            break
        # font=楷体 单行清除
        judge = eachline.replace("楷体", '')
        if hasChinese(judge) or ("img" in eachline) or ('id="video' in eachline):
            if ("图集" in eachline) or ("videoMP4" in eachline):
                continue
            new_content.append(eachline)
    result = new_content
    # print("空行清理完成")
    return result

# 设置文章正文信息


def setContent(title, content, article_url="", target_url=TARGET_URL):
    '''
    保存内容\n
    @param title : 标题\n
    @param content : 内容\n
    @return {string} ：新的内容字符串\n 
    '''
    # 获得图片下载的绝对地址
    source_url = article_url.replace(article_url.split('/')[-1], '')
    # &#13;是每行的分隔标签，分割转换为list
    content = content.split("&#13;")  # list
    # 图片保存以及插入占位符
    picture_deal_result = dealPicture(content, source_url, TARGET_URL)
    content = picture_deal_result['content']
    # 缩略图
    thumb = picture_deal_result['thumb']
    # 视频保存插入占位符
    video_deal_result = dealVideo(content, source_url, TARGET_URL)
    content = video_deal_result['content']
    # 视频列表
    video_list = video_deal_result['video_list']
    # 清理空行和注释
    content = cleanContent(content)
    # 列表转字符串
    content = "".join(content)
    # 转成json格式不能encode
    # result = result.encode('utf-8')
    return {'thumb': thumb, "video_list": video_list, 'content': content}


# 设置单篇文章信息


def setArticleInfo(title, cid="1", typeid="1", thumb="", video="", keywords="", remark="", content="", block="0", modelid=MODEL_ID, author="", source="新华网", addtime="", updatetime=str(int(time.time())), **params):
    '''
    设置文章属性，拼接成json数据\n
    @param cid:文章栏目\n
    @param typeid:模型\n
    @param block:推荐位\n
    @param modelid:模型\n
    @param author:发布人\n
    @param source:来源\n
    @param title:标题\n
    @param content:文章内容\n
    @param keywords:关键字\n
    @param remark:摘要\n
    @param {url} thumb:缩略图\n
    @param {timestamp} addtime:添加时间\n
    @param {timestamp} updatetime:修改时间\n
    @param {dict} **params:未定义数量的参数\n
    @return {json} result:json格式的最终结果\n
    '''
    # TODO 设置焦点图
    thumb = thumb
    content = content
    cid = cid
    typeid = typeid
    block = block
    modelid = modelid
    title = title
    keywords = keywords
    remark = remark
    author = author
    source = source
    addtime = addtime
    video = video
    updatetime = updatetime
    result = {
        "cid": cid,
        "typeid": typeid,
        "block": block,
        "modelid": modelid,
        "title": title,
        "keywords": keywords,
        "remark": remark,
        "thumb": thumb,
        "author": author,
        "source": source,
        "addtime": addtime,
        "video": video,
        "updatetime": updatetime,
        "content": content,
        **params,
    }
    # 不转义中文保存
    result = json.dumps(result, ensure_ascii=False)
    # print(result)
    return result


def saveArticle(article_url="", cid="1", headers="", **params):
    '''
    保存文章
    '''
    if DEBUG:
        print("当前处理:"+article_url)
    try:
        html = getHtml(article_url, headers)
    except:
        pass
    # 原始获取的标题并去除首尾多余字符
    try:
        title = html.xpath(
            '//div[@class="h-title"]/text()')[0].lstrip().rstrip().replace('\u200b', "").replace('\u2022', "")
        # 解决编码问题:gbk' codec can't encode character '\xa0'
        title = title.replace(u'\xa0', u'')
        if DEBUG:
            print(title)
        # 如果是已经下载了的文章则不保存
        with open(log_path + "added.txt", "a+") as added_title:
            added_title.seek(0, 0)
            added = added_title.read()
            if (title+"||") in added and not DEBUG :
                return
    except:
        # print("采集失败，URL: "+article_url)
        saveFailInfo("URL:"+article_url)
        return
    # 获取时间并转换成时间戳
    try:
        addtime = html.xpath(
            '//span[@class="h-time"]/text()')[0].lstrip().rstrip()
        addtime = str(
            int(time.mktime(time.strptime(addtime, "%Y-%m-%d %H:%M:%S"))))
    except:
        # 没获取到时间就填入当前时间
        addtime = str(int(time.time()))
    # 原始获取的正文 bytes
    try:
        content = html.xpath(
            '//div[@id="p-detail"]/div[@class="main-aticle"]|//div[@id="p-detail"]')[0]
    except:
        print("正文未采集到")
        return
    print("准备采集:"+article_url)
    print(title)
    # 转换为utf-8编码的字符
    source_content = etree.tostring(content, encoding="utf-8")
    # 清洗完毕的content保存
    content_clean_result = setContent(title, source_content.decode(
        "utf-8"), article_url=article_url, target_url=TARGET_URL)
    true_content = content_clean_result['content']
    # 得到缩略图
    thumb = content_clean_result['thumb']
    # 得到视频地址
    video_list = content_clean_result['video_list']
    # 得到摘要
    remark = setSummary(true_content)
    # 得到关键词
    keywords = setKeywords(true_content, separator=KEYWORD_SEPARATOR)
    # remark=true_content.split('。')[0]
    # 单篇文章数据
    more_info = params
    article_data = setArticleInfo(cid=cid,
                                  title=title, content=true_content, thumb=thumb, video=video_list, remark=remark, keywords=keywords, addtime=addtime, params=more_info)
    # 测试保存,正式使用的时候注释掉
    # ********************************************************
    # with open(download_path+"data.json", 'wb+') as target:#
    #     target.write(article_data.encode('utf-8'))        #
    # ********************************************************
    with open(log_path+'added.txt', 'a+') as target:
        target.write(title+"||")
    return article_data


def saveFailInfo(info):
    '''
    保存错误信息
    @param {string} info 错误信息
    '''
    return
    with open(download_path+'failed.txt', 'a+') as target:
        target.write(info+'\r\n')


def articleListToJson(articles, datas, headers, cid='1'):
    '''
    单个栏目的文章转换为json数据保存到datas
    '''
    # return
    global article_count
    for article_url in articles:
        data = saveArticle(article_url, cid=cid,
                           headers=headers, publisher='杜成龙')
        if data:
            datas[str(article_count)] = data
            article_count = article_count+1
    print("采集{}篇".format(article_count))
    return


# ********************************主程序开始********************************

# 下载的目录
download_path = os.getcwd()+'/news/'+date+hours+"spider/"
log_path = os.getcwd()+'/news/'+year+week+"log/"
download_asset_path = download_path+'asset/'
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66"
}


# 下载栏目
# 1、国内新闻；2、国际新闻；3、国内军事；4、国际军事；5、评论
url = "http://www.xinhuanet.com/"
print("资源下载目录:{}".format(download_path))
print("当前关键字分隔符为‘{}’".format(KEYWORD_SEPARATOR))
if not (os.path.exists(download_asset_path)):
    os.makedirs(download_asset_path)
if not (os.path.exists(log_path)):
    os.makedirs(log_path)
# 以下为采集
if __name__ == "__main__":
    timestring = year+'-'+month+'-'+day
    # timestring=[yesterday,today]
    # timestring = "2020-09-23"
    datas = {}  # 最终数据
    article_count = 0  # 采集文章计数
    # 评论采集
    articles = []
    cate_url = "http://www.news.cn/world/pl.htm"
    html = getHtml(cate_url, headers)
    comment_article = html.xpath(
        '//ul[@class="dataList"]/li[@class="clearfix"]/descendant::h3/a/@href')
    # comment_article = html.xpath(
    #     '//ul[@class="dataList"]/li[@class="clearfix"]/descendant::h3/a/text()')
    comment_date = html.xpath(
        '//ul[@class="dataList"]/li[@class="clearfix"]/descendant::span[@class="time"]/text()')
    counts = min(len(comment_article), len(comment_date))
    for each in range(counts):
        if comment_date[each] == timestring:
            articles.append(comment_article[each])
    print(articles)
    # 网评采集
    print("评论采集")
    cate_url = "http://www.xinhuanet.com/comments/ej.htm?page=zjmp"
    html = getHtml(cate_url, headers)
    comment_article = html.xpath(
        '//ul[@class="dataList"]/li[@class="clearfix"]/descendant::h3/a/@href')
    # comment_article = html.xpath(
    #     '//ul[@class="dataList"]/li[@class="clearfix"]/descendant::h3/a/text()')
    comment_date = html.xpath(
        '//ul[@class="dataList"]/li[@class="clearfix"]/descendant::span[@class="time"]/text()')
    counts = min(len(comment_article), len(comment_date))
    # print(counts)
    for each in range(counts):
        if comment_date[each] == timestring:
            if comment_article[each] in articles:
                continue
            articles.append(comment_article[each])
    # print(articles)
    # JSON数据构造
    articleListToJson(articles, datas, headers, cid='13')
    # 评论 END
    # 国内军事
    print("国内军事采集")
    cate_url = "http://www.xinhuanet.com/mil/"
    r = requests.get(cate_url, headers=headers)
    html = etree.HTML(r.content)
    cate_article = html.xpath(
        '//div[@class="y_box fr"]/div[@class="tab1"]/div[@class="tab_box"]/div[1]/ul[@class="list_show"]/li/a/@href')
    cate_date = html.xpath(
        '//div[@class="y_box fr"]/div[@class="tab1"]/div[@class="tab_box"]/div[1]/ul[@class="list_show"]/li/div[@class="time"]/text()')
    counts = min(len(cate_article), len(cate_date))
    # print(counts)
    for each in range(counts):
        if cate_date[each].split()[0] == timestring:
            if cate_article[each] in articles:
                continue
            articles.append(cate_article[each])
    # print(cate_article)
    # JSON数据构造
    articleListToJson(articles, datas, headers, cid='3')
    # 国内军事 END
    # 国际军事
    print("国际军事采集")
    cate_url = "http://www.xinhuanet.com/mil/"
    r = requests.get(cate_url, headers=headers)
    html = etree.HTML(r.content)
    cate_article = html.xpath(
        '//div[@class="y_box fr"]/div[@class="tab1"]/div[@class="tab_box"]/div[2]/ul[@class="list_show"]/li/a/@href')
    # cate_article = html.xpath(
    # '//div[@class="y_box fr"]/div[@class="tab1"]/div[@class="tab_box"]/div[2]/ul[@class="list_show"]/li/h2/a/text()')
    cate_date = html.xpath(
        '//div[@class="y_box fr"]/div[@class="tab1"]/div[@class="tab_box"]/div[2]/ul[@class="list_show"]/li/div[@class="time"]/text()')
    counts = min(len(cate_article), len(cate_date))
    # print(counts)
    for each in range(counts):
        # print(cate_date[each].split()[0])
        if cate_date[each].split()[0] == timestring:
            if cate_article[each] in articles:
                continue
            articles.append(cate_article[each])
    # print(articles)
    # JSON数据构造
    # articleListToJson(articles, datas, headers, cid='339')
    # 国际军事 END
    # 国际新闻
    print("国际新闻采集")
    cate_url = "http://www.news.cn/world/wmyl.htm"
    html = getHtml(cate_url, headers)
    cate_article = html.xpath(
        '//ul[@class="dataList"]/li[@class="clearfix"]/descendant::h3/a/@href')
    # cate_article = html.xpath(
    #     '//ul[@class="dataList"]/li[@class="clearfix"]/descendant::h3/a/text()')
    cate_date = html.xpath(
        '//ul[@class="dataList"]/li[@class="clearfix"]/descendant::span[@class="time"]/text()')
    counts = min(len(cate_article), len(cate_date))
    # print(counts)
    for each in range(counts):
        if cate_date[each] == timestring:
            if cate_article[each] in articles:
                continue
            articles.append(cate_article[each])
    # print(articles)
    # JSON数据构造
    articleListToJson(articles, datas, headers, cid='178')
    # 国际新闻  END
    # 首页新闻
    print("首页新闻采集")
    cate_url = "http://www.xinhuanet.com/"
    html = getHtml(cate_url, headers)
    cate_article = html.xpath('//div[@id="focusItem"]/descendant::a/@href') + \
        html.xpath('//div[@id="hpart2L"]/descendant::a/@href')
    counts = len(cate_article)
    for each in range(counts):
        if cate_article[each] in articles:
            continue
        articles.append(cate_article[each])
    # JSON数据构造
    articleListToJson(articles, datas, headers, cid='177')
    # print(articles)
    # 头条
    print("头条采集")
    cate_article = html.xpath('//div[@id="hpart1"]/div/h1/span/a/@href')
    counts = len(cate_article)
    for each in range(counts):
        if cate_article[each] in articles:
            continue
        articles.append(cate_article[each])
    # articles.append("http://www.xinhuanet.com/politics/leaders/2020-12/25/c_1126908944.htm")
    articleListToJson(articles, datas, headers, cid='177')
    # 首页新闻 END
    # *******************************保存**************************************
    datas = json.dumps(datas, ensure_ascii=False)
    # 此处的长度不为0,dumps之后加了两个括号
    if len(datas) > 2:
        with open(download_path+'xinhua_datas.json', 'wb+') as target:
            target.write(datas.encode('utf-8'))
        print("成功保存{}篇文章".format(article_count))
        with open(log_path+'count.txt', 'r') as target:
            saved = target.read()
        with open(log_path+'count.txt', 'w') as target:
            target.write(str(int(article_count)+int(saved)))
    else:
        print("没有需要更新的文章")

    # *******************************保存END***********************************
    # 单篇测试
    # article_url = "http://www.xinhuanet.com/politics/leaders/2020-09/23/c_1126532349.htm"
    # html = getHtml(article_url)
    # content = html.xpath(
    #     '//div[@id="p-detail"]/div[@class="main-aticle"]|//div[@id="p-detail"]')[0]
    # test = etree.tostring(content, encoding="utf-8")
    # with open(download_path+'1.html', 'a+') as target:
    #     target.write(test.decode('utf-8'))
    # test=saveArticle(article_url=article_url,headers=headers,publisher='杜成龙')
    # with open(download_path+'datas.json', 'wb+') as target:
    #     target.write(test.encode('utf-8'))
# print(test)
