# -*- coding: utf-8 -*-

# Author: 一根鱼骨棒
# Date: 2020-09-23 16:17:06
# LastEditTime: 2021-07-20 08:51:23
# LastEditors: 一根鱼骨棒
# Description: 本开源代码使用GPL 3.0协议
# Software: VScode
# Copyright 2020 迷舍

from function import *


def getList(cate_id=''):
    articles = []
    cate_url = "http://da.wa.news.cn/nodeart/page?nid=" + \
        cate_id+"&pgnum=1&cnt=10&attr=&tp=1&orderby=1"
    json_data = getJson(cate_url)['data']['list']
    lenth = len(json_data)
    print(lenth)
    for num in range(lenth):
        if json_data[num]['LinkUrl'] in articles:
            continue
        articles.append(json_data[num]['LinkUrl'])
    return articles

# 处理图片


def dealPicture(content, source_url='', target_url=TARGET_URL):
    '''
    获取图片并保存,对内容中的图片占位符进行处理\n
    @param content : 文本\n
    @return {list} ：0缩略图，1文字内容列表\n
    '''

    img_pattern = r'<img.*src="(.*?)".*?'
    new_content = []
    img_list = []
    thumb = ''
    for eachline in content:
        img = re.search(img_pattern, eachline)
        if img != None:
            # 获取图片src
            img_src = re.search(img_pattern, eachline).group()
            # img_src = re.search('src=(.*)(")', img_src).group()
            img_src = re.search(r'.+?src="(\S+)"', img_src).group()
            img_src = img_src.split('src=')[1].strip('"')
            # 将图片插入内容对应位置
            if "space.gif" in img_src:
                eachline = ""
            else:
                eachline = "\n<p style='text-align:center;'><img src='" + \
                    target_url+img_src.split('/')[-1]+"' /></p>"
                img_list.append(img_src)
        new_content.append(eachline)
    #########图片列表输出#########
    if DEBUG:
        print(img_list)
    ############################
    for img in img_list:
        if '//' in img:
            img_url = "http://"+img.split("//")[-1]
            img = img.split('/')[-1]
        else:
            img_url = source_url+img
        try:
            img_data = requests.get(img_url, headers=headers).content
        except requests.exceptions.ConnectionError:
            print('Error:图片资源下载失败:'+img_url)
            continue
        if os.path.exists(download_asset_path+img) or DOWNLOAD_PIC == False:
            continue
        try:
            with open(download_asset_path+img, "wb") as target:
                target.write(img_data)
        except:
            print("图片保存异常")
    if DOWNLOAD_PIC:
        # 处理图片尺寸
        resizePicture(img_list)
        # 处理缩略图
        thumb = setThumb(img_list)
    content = new_content
    # print("图片处理完成")
    return {'thumb': thumb, 'content': content}


# 处理视频


def dealVideo(content, source_url='', target_url=TARGET_URL):
    '''
    TODO 视频保存，视频地址，script插入
    获取视频并保存,对内容中的视频占位符进行处理\n
    @param url : 音频链接\n
    @return {list} ：新的内容列表\n  
    '''
    new_content = []
    video_list = []
    # print("视频处理完成")
    return {"video_list": video_list, "content": content}

# 清理文章中不必要的内容


def cleanContent(content):
    '''
    清除空行和注释\n
    @param content : 文本\n
    @return {list} ：新的内容列表\n 
    '''
    flags = ["责任编辑", "延伸阅读", "链接：", "链接:", "相关新闻", "【纠错】", "分享到"]
    new_content = []
    for eachline in content:
        # 清除注释
        eachline = eachline.strip()
        eachline=eachline.replace("http://www.12371.cn/special/dsbn/",'/tbch/zdjr/jiandang2021/dsbn/index.shtml')
        if eachline.startswith("<!--") and eachline.endswith("-->"):
            continue

        # # 清除脚本内容 遇到图片和文字在一行的时候会出问题
        # if "//" in eachline and hasChinese(eachline):
        #     continue
        # 结束判断
        end = False
        for flag in flags:
            if flag in eachline:
                end = True
                break
        if end:
            break
        # font=楷体 单行清除
        judge = eachline.replace("楷体", '')
        if hasChinese(judge) or ("img" in eachline) or ('id="video' in eachline):
            if ("图集" in eachline) or ("videoMP4" in eachline):
                continue
            new_content.append(eachline)
    result = new_content
    # print("空行清理完成")
    return result

# 设置文章正文信息


def setContent(title, content, article_url="", target_url=TARGET_URL):
    '''
    保存内容\n
    @param title : 标题\n
    @param content : 内容\n
    @return {string} ：新的内容字符串\n 
    '''
    # 获得图片下载的绝对地址
    source_url = article_url.replace(article_url.split('/')[-1], '')
    # TODO 分页内容处理
    # &#13;是每行的分隔标签，分割转换为list
    content = content.split("&#13;")  # list
    # 图片保存以及插入占位符
    picture_deal_result = dealPicture(content, source_url, TARGET_URL)
    content = picture_deal_result['content']
    # 缩略图
    thumb = picture_deal_result['thumb']
    # 视频保存插入占位符
    video_deal_result = dealVideo(content, source_url, TARGET_URL)
    content = video_deal_result['content']
    # 视频列表
    video_list = video_deal_result['video_list']

    # 清理空行和注释
    content = cleanContent(content)
    # 列表转字符串
    content = "".join(content)
    # 转成json格式不能encode
    # result = result.encode('utf-8')
    return {'thumb': thumb, "video_list": video_list, 'content': content}


# 设置单篇文章信息


def setArticleInfo(title, cid="1", typeid="1", thumb="", video="", keywords="", remark="", content="", block="0", modelid=MODEL_ID, author="", source="新华网", addtime="", updatetime=str(int(time.time())), **params):
    '''
    设置文章属性，拼接成json数据\n
    @param cid:文章栏目\n
    @param typeid:模型\n
    @param block:推荐位\n
    @param modelid:模型\n
    @param author:发布人\n
    @param source:来源\n
    @param title:标题\n
    @param content:文章内容\n
    @param keywords:关键字\n
    @param remark:摘要\n
    @param {url} thumb:缩略图\n
    @param {timestamp} addtime:添加时间\n
    @param {timestamp} updatetime:修改时间\n
    @param {dict} **params:未定义数量的参数\n
    @return {json} result:json格式的最终结果\n
    '''
    # TODO 设置焦点图
    thumb = thumb
    content = content
    cid = cid
    typeid = typeid
    block = block
    modelid = modelid
    title = title
    keywords = keywords
    remark = remark
    author = author
    source = source
    addtime = addtime
    video = video
    updatetime = updatetime
    result = {
        "cid": cid,
        "typeid": typeid,
        "block": block,
        "modelid": modelid,
        "title": title,
        "keywords": keywords,
        "remark": remark,
        "thumb": thumb,
        "author": author,
        "source": source,
        "addtime": addtime,
        "video": video,
        "updatetime": updatetime,
        "content": content,
        **params,
    }
    # 不转义中文保存
    result = json.dumps(result, ensure_ascii=False)
    # print(result)
    return result


def saveArticle(article_url="", cid="1", headers="", **params):
    '''
    保存文章
    '''
    if DEBUG:
        print("当前处理:"+article_url)
    try:
        html = getHtml(article_url, headers)
    except:
        pass
    # 原始获取的标题并去除首尾多余字符
    catch = False
    try:
        title = html.xpath('//h1[@class="big_title"]')[0]
        title=etree.tostring(title, encoding="utf-8").decode('utf-8')
        title=title.split("<!--repaste.title.begin-->")[1].split("<!--repaste.title.end-->")[0]
    except:
        saveFailInfo("URL:"+article_url)
        return
    if DEBUG:
        print(title)
    # 如果是已经下载了的文章则不保存
    with open(log_path + log_name, "a+") as added_title:
        added_title.seek(0, 0)
        added = added_title.read()
        if (title+"||") in added and not DEBUG :
            return

    # 获取时间并转换成时间戳
    try:
        addtime = html.xpath(
            '//i[@class="time"]/text()')[0].lstrip().rstrip()
        addtime = str(
            int(time.mktime(time.strptime(addtime, "%Y-%m-%d %H:%M:%S"))))
        print(addtime)
    except:
        # 没获取到时间就填入当前时间
        addtime = str(int(time.time()))
    # 原始获取的正文 bytes
    try:
        content = html.xpath(
            '//div[@id="font_area"]')[0]
    except:
        print("正文未采集到:"+article_url)
        return
    print("准备采集:"+article_url)
    print(title)
    # 获取音频
    try:
        req=requests.get(article_url).content
        flag = int(req.decode('utf-8').index("<!--repaste.video.code.begin-->"))
        ids = req.decode('utf-8')[flag+31:flag+63]
        audio_url="http://vdn.apps.cntv.cn/api/getIpadVideoInfo.do?pid="+ids
        audio_json=requests.get(audio_url).content.decode('utf-8').split("html5VideoData = '")[1].replace("';getHtml5VideoData(html5VideoData);","").strip()
        audio_json=json.loads(audio_json)
        audio_url=audio_json['video']['chapters'][0]['url']
        audio_title=audio_json['title']+'.mp3'
        print('audio_url:'+audio_url)
        Process(target=idmDownloader, args=(IDM, audio_url, download_asset_path, audio_title)).run()
    except:
        audio_url=""
        audio_title=""
        print("audio null:"+article_url)

    # 转换为utf-8编码的字符
    source_content = etree.tostring(content, encoding="utf-8")
    # 清洗完毕的content保存
    content_clean_result = setContent(title, source_content.decode(
        "utf-8"), article_url=article_url, target_url=TARGET_URL)
    true_content = content_clean_result['content']
    # 得到缩略图
    thumb = content_clean_result['thumb']
    # 得到视频地址
    video_list = content_clean_result['video_list']
    if audio_title:
        video_list.append(TARGET_URL+audio_title)
    print(video_list)
    # 得到摘要
    remark = setSummary(true_content)
    # 得到关键词
    keywords = setKeywords(true_content, separator=KEYWORD_SEPARATOR)
    # remark=true_content.split('。')[0]
    # 单篇文章数据
    more_info = params
    article_data = setArticleInfo(cid=cid,
                                  title=title, content=true_content, thumb=thumb, video=video_list, remark=remark, keywords=keywords, addtime=addtime, params=more_info)
    # 测试保存,正式使用的时候注释掉
    # ********************************************************
    # with open(download_path+"data.json", 'wb+') as target:#
    #     target.write(article_data.encode('utf-8'))        #
    # ********************************************************
    with open(log_path+log_name, 'a+') as target:
        target.write(title+"||")
    return article_data


def articleListToJson(articles, datas, headers, cid='1'):
    '''
    单个栏目的文章转换为json数据保存到datas
    '''
    # return
    global article_count
    for article_url in articles:
        data = saveArticle(article_url, cid=cid,
                           headers=headers, publisher='杜成龙')
        if data:
            datas[str(article_count)] = data
            article_count = article_count+1
    print("采集{}篇".format(article_count))
    return


# ********************************主程序开始********************************

# 下载的目录
download_path = os.getcwd()+'/news/'+date+hours+"spider/"
log_name='party'+year+"added.txt"
download_asset_path = download_path+'asset/'



# 下载栏目
print("资源下载目录:{}".format(download_path))
print("当前关键字分隔符为‘{}’".format(KEYWORD_SEPARATOR))
if not (os.path.exists(download_asset_path)):
    os.makedirs(download_asset_path)
if not (os.path.exists(log_path)):
    os.makedirs(log_path)
# 以下为采集
if __name__ == "__main__":
    timestring = year+'-'+month+'-'+day
    # timestring=[yesterday,today]
    # timestring = "2020-09-23"
    datas = {}  # 最终数据
    article_count = 0  # 采集文章计数
    articles = []
    # 百年瞬间
    print("党史百年")
    cate_url = "http://www.12371.cn/special/dsbn/"
    r = requests.get(cate_url, headers=headers)
    html = getHtml(cate_url)
    cate_article = html.xpath(
        '//div[@class="column_wrapper_1200"]/descendant::ul[@class="showMoreNChildren"]/li/a/@href')

    counts = len(cate_article)
    for each in range(counts):
        articles.append(cate_article[each])
    # JSON数据构造
    articleListToJson(articles, datas, headers, cid='708')
    # 百年瞬间 END

    # *******************************保存**************************************
    datas = json.dumps(datas, ensure_ascii=False)

    if len(datas) > 2:     # 此处的长度不为0,dumps之后加了两个括号
        with open(download_path+'dsbn.json', 'wb+') as target:
            target.write(datas.encode('utf-8'))
        print("成功保存{}篇文章".format(article_count))
        with open(log_path+'count.txt', 'r') as target:
            saved = target.read()
        with open(log_path+'count.txt', 'w') as target:
            target.write(str(int(article_count)+int(saved)))
    else:
        print("没有需要更新的文章")

    # *******************************保存END***********************************
    # 单篇测试
    # article_url = "http://www.12371.cn/2021/02/25/VIDE1614212641174507.shtml"
    # req=requests.get(article_url).content
    # html = etree.HTML(req)
    # title = html.xpath('//h1[@class="big_title"]')[0]
    # title=etree.tostring(title, encoding="utf-8").decode('utf-8')
    # title=title.split("<!--repaste.title.begin-->")[1].split("<!--repaste.title.end-->")[0]
    # content = html.xpath('//div[@id="font_area"]')[0]
    # test = etree.tostring(content, encoding="utf-8")
    # with open(download_path+'1.html', 'wb+') as target:
        # target.write(test)
    # test=saveArticle(article_url=article_url,headers=headers,publisher='杜成龙')
    # with open(download_path+'datas.json', 'wb+') as target:
        # target.write(test)
    # print(test)
