# -*- coding: utf-8 -*-

# Author: 一根鱼骨棒
# Date: 2020-09-26 20:55:33
# LastEditTime: 2022-02-23 17:28:07
# LastEditors: 一根鱼骨棒
# Description: 本开源代码使用GPL 3.0协议
# Software: VScode
# Copyright 2020 迷舍

from function import *

# 处理图片


def dealPicture(content, source_url='', target_url=TARGET_URL):
    '''
    获取图片并保存,对内容中的图片占位符进行处理\n
    @param content : 文本\n
    @return {list} ：0缩略图，1文字内容列表\n
    '''

    img_pattern = r'<img.*src="(.*?)".*?'
    new_content = []
    img_list = []
    thumb = ''
    desc = ""
    for eachline in content:
        img = re.search(img_pattern, eachline)
        if img != None:
            # 跳过注释部分
            if "<!--article_adlist" in eachline:
                continue
            # 新浪的图片描述和图片处于同一行
            if "img_descr" in eachline:
                desc = eachline.split('<span class="img_descr')[-1].replace(
                    "</span>", "").replace("</div>", "").strip('"').strip('/').strip('>')
                print(str(desc))
            else:
                desc = ""
            # 获取图片src
            img_src = re.search(img_pattern, eachline).group()
            # img_src = re.search('src=(.*)(")', img_src).group()
            img_src = re.search(r'.+?src="(\S+)"', img_src).group()
            img_src = img_src.split('src=')[1].strip('"')
            # 将图片插入内容对应位置
            if "space.gif" in img_src:
                eachline = ""
            else:
                eachline = "\n<p style='text-align:center;'><img src='" + \
                    target_url+img_src.split("/")[-1]+"' /></p>"
                img_list.append(img_src)
        new_content.append(eachline)
        if desc != "":
            describe = "\n<p style='text-align:center;'>"+desc+"</p>"
            new_content.append(describe)
            desc = ""
    #########图片列表输出#########
    if DEBUG:
        print(img_list)
    ############################
    for img in img_list:
        if '//' in img:
            img_url = "http://"+img.split("//")[-1]
            img = img.split('/')[-1]
        else:
            img_url = source_url+img
        img_data = requests.get(img_url, headers=headers).content
        if os.path.exists(download_asset_path+img) or DOWNLOAD_PIC == False:
            continue
        try:
            with open(download_asset_path+img, "wb") as target:
                target.write(img_data)
        except:
            print("图片保存异常")
    if DOWNLOAD_PIC:
        # 处理图片尺寸
        resizePicture(img_list)
        # 处理缩略图
        thumb = setThumb(img_list)
    content = new_content
    # print("图片处理完成")
    return {'thumb': thumb, 'content': content}


# 处理视频
def dealVideo(content, source_url='', target_url=TARGET_URL):
    '''
    TODO 视频保存，视频地址，script插入
    获取视频并保存,对内容中的视频占位符进行处理\n
    @param content : 文本\n
    @return {list} ：新的内容列表\n  
    '''
    video_pattern = r'video_id:.*(.*?),'
    new_content = []
    video_count = 1
    video_list = []
    for eachline in content:

        video = re.search(video_pattern, eachline)

        if video != None:
            # 获取视频src
            video_id = re.search(video_pattern, eachline).group().strip()
            video_id = video_id.split('video_id:')[1].strip().split(',')[0].strip()
            video_src = 'https://api.ivideo.sina.com.cn/public/video/play?video_id='+video_id + \
                '&appver=V11220.200803.02&appname=sinaplayer_pc&applt=web&tags=sinaplayer_pc&player=all'
            if video_src != " " and ("http" in video_src) and DOWNLOAD_VIDEO:
                data = requests.get(video_src, headers=headers).text
                # json转换
                data = json.loads(data)
                # 获取视频地址

                video_url = data["data"]["videos"][0]['dispatch_result']['url'].replace(
                    r"\u0026", "&")
                video_title = video_id+'.'+data["data"]["videos"][0]['type']
                video_list.append(TARGET_URL+video_title)
                # video_thumb=data["result"]["videoInfos"]['poster']
                Process(target=idmDownloader, args=(
                    IDM, video_url, download_asset_path, video_title)).run()
            # 插入内容对应位置
            # print(eachline)
            if video_count == 1:
                eachline = ""
            else:
                eachline = "\n<div id=video"+str(video_count)+"></div>"
            video_count = video_count+1
        new_content.append(eachline)
    # print(new_content)
    content = new_content
    # print("视频处理完成")
    return {"video_list": video_list, "content": content}

# 清理文章中不必要的内容


def cleanContent(content):
    '''
    清除空行和注释\n
    @param content : 文本\n
    @return {list} ：新的内容列表\n 
    '''
    flags = ["责任编辑", "延伸阅读", "新闻链接", "相关链接",
             "相关新闻", "【纠错】", "分享到", "免责声明", "图片来自网络"]
    new_content = []
    for eachline in content:
        # 清除注释
        eachline.strip()
        if eachline.startswith("<!--") and eachline.endswith("-->"):
            continue
        # TODO 清除脚本内容
        if "//" in eachline and hasChinese(eachline):
            continue
        # 结束判断
        end = False
        for flag in flags:
            if flag in eachline:
                end = True
                break
        if end:
            break
        if hasChinese(eachline) or ("img" in eachline) or ("id=video" in eachline):
            new_content.append(eachline)
    result = new_content
    # print("空行清理完成")
    return result

# 设置文章正文信息


def setContent(title, content, article_url="", target_url=TARGET_URL):
    '''
    保存内容\n
    @param title : 标题\n
    @param content : 内容\n
    @return {string} ：新的内容字符串\n 
    '''
    # 获得图片下载的绝对地址
    source_url = article_url.replace(article_url.split('/')[-1], '')
    # 是每行的分隔标签，分割转换为list
    content = content.split('\n')  # list
    # 图片保存以及插入占位符
    picture_deal_result = dealPicture(content, source_url, TARGET_URL)
    content = picture_deal_result['content']
    # 缩略图
    thumb = picture_deal_result['thumb']
    # 视频保存插入占位符
    video_deal_result = dealVideo(content, source_url, TARGET_URL)
    content = video_deal_result['content']
    # 视频列表
    video_list = video_deal_result['video_list']
    # 清理空行和注释
    content = cleanContent(content)
    # 列表转字符串
    content = "".join(content)
    # 转成json格式不能encode
    # result = result.encode('utf-8')
    return {'thumb': thumb, "video_list": video_list, 'content': content}


# 设置单篇文章信息


def setArticleInfo(title, cid="1", typeid="1", thumb="", video="", keywords="", remark="", content="", block="0", modelid=MODEL_ID, author="", source="新浪军事", addtime="", updatetime=str(int(time.time())), publisher='杜成龙', **params):
    '''
    设置文章属性，拼接成json数据\n
    @param cid:文章栏目\n
    @param typeid:模型\n
    @param block:推荐位\n
    @param modelid:模型\n
    @param author:发布人\n
    @param source:来源\n
    @param title:标题\n
    @param content:文章内容\n
    @param keywords:关键字\n
    @param remark:摘要\n
    @param {url} thumb:缩略图\n
    @param {timestamp} addtime:添加时间\n
    @param {timestamp} updatetime:修改时间\n
    @param {list} resource_list 包含的多媒体资源 \n 
    @param {dict} **params:未定义数量的参数\n
    @return {json} result:json格式的最终结果\n
    '''
    # TODO 设置焦点图
    thumb = thumb
    content = content
    cid = cid
    typeid = typeid
    block = block
    modelid = modelid
    title = title
    keywords = keywords
    remark = remark
    author = author
    source = source
    addtime = addtime
    video = video
    updatetime = updatetime
    publisher = publisher
    result = {
        "cid": cid,
        "typeid": typeid,
        "block": block,
        "modelid": modelid,
        "title": title,
        "keywords": keywords,
        "remark": remark,
        "thumb": thumb,
        "author": author,
        "source": source,
        "addtime": addtime,
        "video": video,
        "updatetime": updatetime,
        "content": content,
        "publisher": publisher,
        **params,
    }
    # 不转义中文保存
    result = json.dumps(result, ensure_ascii=False)
    # print(result)
    return result


def saveArticle(article_url="", cid="1", headers="", **params):
    '''
    保存单篇文章数据，更换不同的采集网站只需修改此处即可
    '''
    if DEBUG:
        print("当前处理:"+article_url)
    try:
        html = getHtml(article_url, headers)
    except:
        print("html获取失败")
        pass
    # 原始获取的标题并去除首尾多余字符
    try:
        title = html.xpath(
            '//h1[@class="main-title"]/text()')[0].lstrip().rstrip().replace(r'[^\u4e00-\u9fa50-9]', '')
        # 解决编码问题:gbk' codec can't encode character '\xa0'
        title = title.replace(u'\xa0', u'')
        if DEBUG:
            print("原始获取的标题:"+title)
        # 如果是已经下载了的文章则不保存
        with open(log_path + log_name, "a+") as added_title:
            added_title.seek(0, 0)
            added = added_title.read()
            if title in added:
                return
    except:
        print("采集失败，URL: "+article_url)
        saveFailInfo("URL:"+article_url)
        return
    # 获取时间并转换成时间戳
    try:
        addtime = html.xpath(
            '//div[@class="date-source"]/*[@class="date"]/text()')[0].lstrip().rstrip()
        if DEBUG:
            print(addtime)
        addtime = str(
            int(time.mktime(time.strptime(addtime, "%Y年%m月%d日 %H:%M"))))
        if DEBUG:
            print(addtime)
    except:
        # 没获取到时间就填入当前时间
        addtime = str(int(time.time()))
    # 得到来源
    try:
        source = html.xpath(
            '//div[@class="date-source"]/*[@class="source"]/text()')[0].lstrip().rstrip()
    except:
        source = "新浪军事"
    if DEBUG:
        print("source:"+source)
    # 原始获取的正文 bytes
    try:
        content = html.xpath(
            '//div[@id="article"]')[0]
    except:
        print("正文未采集到")
        return
    print("准备采集:"+article_url)
    print(title)
    # 转换为utf-8编码的字符
    source_content = etree.tostring(content, encoding="utf-8")
    # 清洗完毕的content保存
    content_clean_result = setContent(title, source_content.decode(
        "utf-8"), article_url=article_url, target_url=TARGET_URL)
    true_content = content_clean_result['content']
    # 得到缩略图
    thumb = content_clean_result['thumb']
    # 得到视频地址
    video_list = content_clean_result['video_list']
    # 得到摘要
    remark = setSummary(true_content)
    # 得到关键词
    # keywords = setKeywords(true_content, separator=KEYWORD_SEPARATOR)
    keywords = html.xpath(
        '//*[@id="article-bottom"]/div[@class="keywords"]/a/text()')
    keywords = KEYWORD_SEPARATOR.join(keywords)

    # remark=true_content.split('。')[0]

    # 单篇文章数据
    more_info = params
    article_data = setArticleInfo(cid=cid,
                                  title=title, content=true_content, thumb=thumb, video=video_list, remark=remark, keywords=keywords, source=source, addtime=addtime, params=more_info)
    # 测试保存,正式使用的时候注释掉
    # ********************************************************
    # with open(download_path+"data.json", 'wb+') as target:#
    #     target.write(article_data.encode('utf-8'))        #
    # ********************************************************
    with open(log_path+log_name, 'a+') as target:
        try:
            target.write(title+"||")
        except UnicodeEncodeError as e:
            print(e)
    return article_data


def articleListToJson(articles, datas, headers, cid='1'):
    '''
    单个栏目的文章转换为json数据保存到datas
    '''
    # return
    global article_count
    for article_url in articles:
        data = saveArticle(article_url, cid=cid,
                           headers=headers, publisher='杜成龙')
        if data:
            datas[str(article_count)] = data
            article_count = article_count+1
    print("采集{}篇".format(article_count))
    return


# ********************************主程序开始********************************


# 以下为采集
if __name__ == "__main__":
    print("资源下载目录:{}".format(download_path))
    print("当前关键字分隔符为‘{}’".format(KEYWORD_SEPARATOR))
    filename = 'xinlang_datas.json'
    timestring = year+'-'+month+'-'+day
    datas = {}  # 最终数据
    article_count = 0  # 采集文章计数
    # 国际军事采集
    articles = []
    cate_url = "http://mil.news.sina.com.cn/roll/index.d.html?cid=57919"
    html = getHtml(cate_url, headers)
    comment_article = html.xpath(
        '//div[@class="fixList"]/descendant::li/a/@href')
    comment_date = html.xpath(
        '//div[@class="fixList"]/descendant::li/a/text()')
    counts = min(len(comment_article), len(comment_date))
    for each in range(counts):
        articles.append(comment_article[each])
    # print(articles)
    # JSON数据构造
    articleListToJson(articles, datas, headers, cid='339')
    # END
    # *******************************保存**************************************
    datas = json.dumps(datas, ensure_ascii=False)
    # 此处的长度不为0,dumps之后加了两个括号
    if len(datas) > 2:
        if os.path.isfile(filename):
            filename = filename+".new"
            print("资源文件已存在请检查，已将新文件重新命名为{}请处理冲突".format(filename))
        with open(download_path+filename, 'wb+') as target:
            target.write(datas.encode('utf-8'))
        print("成功保存{}篇文章".format(article_count))
        # 统计采集的文章总数
        if DEBUG:
            print("DEBUG模式不统计数据")
        else:
            with open(log_path+'count.txt', 'r') as target:
                saved = target.read()
            with open(log_path+'count.txt', 'w') as target:
                target.write(str(int(article_count)+int(saved)))
    else:
        print("没有需要更新的文章")

    # *******************************保存END***********************************
    # 单篇测试
    # article_url = "http://mil.news.sina.com.cn/roll/index.d.html?cid=57919"
    # article_url = "https://mil.news.sina.com.cn/2020-10-09/doc-iivhvpwz0987830.shtml"
    # r=requests.get(article_url)
    # html = getHtml(article_url)
    # content = html.xpath('//div[@id="article"]')[0]
    # content = etree.tostring(content, encoding="utf-8")
    # content=content.decode('utf-8').split('\n')
    # print(content)
    # content=saveArticle(article_url=article_url,headers=headers,publisher='杜成龙')
    # with open(download_path+'1.html', 'a+') as target:
    #     target.write(str(content))
    # test=saveArticle(article_url=article_url,headers=headers,publisher='杜成龙')
    # with open(download_path+'datas.json', 'wb+') as target:
    #     target.write(test.encode('utf-8'))
# print(test)
