# -*- coding: utf-8 -*-

# Author: 一根鱼骨棒
# Date: 2020-09-26 20:55:33
# LastEditTime: 2023-02-15 08:48:10
# LastEditors: 一根鱼骨棒
# Description: 本开源代码使用GPL 3.0协议
# Software: VScode
# Copyright 2020 迷舍

from function import *
from bs4 import BeautifulSoup
# 处理图片


def dealPicture(content, source_url='', target_url=TARGET_URL):
    '''
    获取图片并保存,对内容中的图片占位符进行处理\n
    @param content : 文本\n
    @return {list} ：0缩略图，1文字内容列表\n
    '''

    img_pattern = r'<img.*src="(.*?)".*?'
    new_content = []
    img_list = []
    thumb = ''
    for eachline in content:
        img = re.search(img_pattern, eachline)
        if img != None:
            # 获取图片src
            img_src = re.search(img_pattern, eachline).group()
            # img_src = re.search('src=(.*)(")', img_src).group()
            img_src = re.search(r'.+?src="(\S+)"', img_src).group()
            
            
            if "../../" in img_src:
                img_src = img_src.split('src=')[1].strip('"').split("../../")[1]
            else:
                img_src = "_attachment"+img_src.split('src=')[1].strip('"').split("_attachment")[-1]
 
            # 将图片插入内容对应位置
            if "space.gif" in img_src:
                eachline = ""
            else:
                eachline = "\n<p style='text-align:center;'><img src='" + \
                    target_url+img_src.split('/')[-1]+"' /></p>"
                img_list.append(img_src)
        new_content.append(eachline)
    #########图片列表输出#########
    if DEBUG:
        print(img_list)
    ############################
    for img in img_list:
        if '//' in img:
            img_url = "http://"+img.split("//")[-1]
            img = img.split('/')[-1]
        else:
            img_url = source_url+img
            img = img.split('/')[-1]
        print(img_url)
        img_data = requests.get(img_url, headers=headers).content

        if os.path.exists(download_asset_path+img) or DOWNLOAD_PIC == False:
            continue
        try:
            with open(download_asset_path+img, "wb") as target:
                target.write(img_data)
        except:
            print("图片保存异常")
    if DOWNLOAD_PIC:
        # 处理图片尺寸
        resizePicture(img_list)
        # 处理缩略图
        thumb = setThumb(img_list)
    content = new_content
    # print("图片处理完成")
    return {'thumb': thumb, 'content': content}

# 处理视频


def dealVideo(content, source_url='', target_url=TARGET_URL):
    '''
    获取视频并保存,对内容中的视频占位符进行处理\n
    @param content : 文本\n
    @return {list} ：新的内容列表\n  
    '''
    video_pattern = r'//Video.*(.*?)'
    new_content = []
    video_count = 1
    video_list = []
    for eachline in content:
        video = re.search(video_pattern, eachline)
        if video != None:
            # 获取视频src
            video_src = re.search(video_pattern, eachline).group()
            if video_src != " " and ("http" in video_src) and DOWNLOAD_VIDEO:
                video_url=video_src.split("//Video")[-1].strip()
                video_title = video_url.split('/')[-1]
                print(video_url)
                print(video_title)
                video_list.append(TARGET_URL+video_title)
                # video_thumb=data["result"]["videoInfos"]['poster']
                Process(target=idmDownloader, args=(
                    IDM, video_url, download_asset_path, video_title)).run()
            # 插入内容对应位置
            # print(eachline)
            if video_count == 1:
                eachline = ""
            else:
                eachline = "\n<div id=video"+str(video_count)+"></div>"
            video_count = video_count+1
        new_content.append(eachline)
    # print(new_content)
    content = new_content
    # print("视频处理完成")
    return {"video_list": video_list, "content": content}

# 清理文章中不必要的内容


def cleanContent(content):
    '''
    TODO 通过标签来清理多行
    清除空行和注释\n
    @param content : 文本\n
    @return {list} ：新的内容列表\n 
    '''
    flags = ["责任编辑", "延伸阅读", "新闻链接", "相关链接",
             "相关新闻", "【纠错】", "分享到", "免责声明", "扫描二维码"]
    new_content = []
    re_comment = re.compile('<!--.*?-->')
    for eachline in content:
        # 清除注释
        
        eachline = re_comment.sub('', eachline)
        eachline = eachline.strip()
        if eachline.startswith("<!--") and (not hasChinese(eachline)):
            continue

        if "//" in eachline and hasChinese(eachline):

            continue
        # 结束判断
        end = False
        for flag in flags:
            if flag in eachline:
                end = True
                break
        if end:
            break
        
        if eachline:
            new_content.append(eachline)
    result = new_content
    # print("空行清理完成")
    return result

# 设置文章正文信息


def setContent(title, content, article_url="", target_url=TARGET_URL):
    '''
    保存内容\n
    @param title : 标题\n
    @param content : 内容\n
    @return {string} ：新的内容字符串\n 
    '''
    # 获得图片下载的绝对地址前缀
    source_url = re.search(r"http://www.81.cn/(.*?)/.*?",article_url).group()

    # 是每行的分隔标签，分割转换为list
    # TODO bs 获取内容 格式化
    bs_soup=BeautifulSoup(content,'html.parser').prettify()
    content = bs_soup.split('\n')  # list


    # content = content.split('&#13;')
    # 图片保存以及插入占位符
    picture_deal_result = dealPicture(content, source_url, TARGET_URL)
    content = picture_deal_result['content']
    # 缩略图
    thumb = picture_deal_result['thumb']
    # 视频保存插入占位符
    video_deal_result = dealVideo(content, source_url, TARGET_URL)
    content = video_deal_result['content']
    # 视频列表
    video_list = video_deal_result['video_list']
    # 清理空行和注释
    content = cleanContent(content)
    # 列表转字符串
    content = "".join(content)
    if DEBUG:
        print((content))
    # 转成json格式不能encode
    # result = result.encode('utf-8')
    return {'thumb': thumb, "video_list": video_list, 'content': content}


# 设置单篇文章信息


def setArticleInfo(title, cid="1", typeid="1", thumb="", video="", keywords="", remark="", content="", block="0", modelid=MODEL_ID, author="", source="中国军网-解放军报", addtime="", updatetime=str(int(time.time())), publisher='杜成龙', **params):
    '''
    设置文章属性，拼接成json数据\n
    @param cid:文章栏目\n
    @param typeid:模型\n
    @param block:推荐位\n
    @param modelid:模型\n
    @param author:发布人\n
    @param source:来源\n
    @param title:标题\n
    @param content:文章内容\n
    @param keywords:关键字\n
    @param remark:摘要\n
    @param {url} thumb:缩略图\n
    @param {timestamp} addtime:添加时间\n
    @param {timestamp} updatetime:修改时间\n
    @param {list} resource_list 包含的多媒体资源 \n 
    @param {dict} **params:未定义数量的参数\n
    @return {json} result:json格式的最终结果\n
    '''
    # TODO 设置焦点图
    thumb = thumb
    content = content
    cid = cid
    typeid = typeid
    block = block
    modelid = modelid
    title = title
    keywords = keywords
    remark = remark
    author = author
    source = source
    addtime = addtime
    video = video
    updatetime = updatetime
    publisher = publisher
    result = {
        "cid": cid,
        "typeid": typeid,
        "block": block,
        "modelid": modelid,
        "title": title,
        "keywords": keywords,
        "remark": remark,
        "thumb": thumb,
        "author": author,
        "source": source,
        "addtime": addtime,
        "video": video,
        "updatetime": updatetime,
        "content": content,
        "publisher":publisher,
        **params,
    }
    # 不转义中文保存
    result = json.dumps(result, ensure_ascii=False)
    # print(result)
    return result


def saveArticle(article_url="", cid="1", headers="", **params):
    '''
    保存单篇文章数据，更换不同的采集网站只需修改此处即可
    '''
    if DEBUG:
        print("当前处理:"+article_url)
    try:
        html = getHtml(article_url, headers)
        time.sleep(random.random()*5)
    except:
        print("html获取失败")
        return
    # 原始获取的标题并去除首尾多余字符
    try:
        title = html.xpath(
            '//div[@class="article-header"]/h1/text()|//div[@class="container artichle-info"]/h2/text()')[0].lstrip().rstrip().replace(r'[^\u4e00-\u9fa50-9]','')
        # 解决编码问题:gbk' codec can't encode character '\xa0'
        title = title.replace(u'\xa0', u'').replace(u'\u378e', u'')
        if DEBUG:
            print("原始获取的标题:"+title)
        # 如果是已经下载了的文章则不保存
        if not DEBUG:
            with open(log_path + log_name, "a+") as added_title:
                added_title.seek(0, 0)
                added = added_title.read()
                if title in added:
                    return
    except:
        print("采集失败，URL: "+article_url)
        saveFailInfo("URL:"+article_url)
        return
    # 获取时间并转换成时间戳

    addtime = str(int(time.time()))

    # 原始获取的正文 bytes
    try:
        content = html.xpath('//div[@id="article-content"]')[0]
    except:
        print("正文未采集到")
        return
    print("准备采集:"+article_url)
    print(title)
    # 转换为utf-8编码的字符
    source_content = etree.tostring(content, encoding="utf-8")
    # 清洗完毕的content保存
    content_clean_result = setContent(title, source_content.decode(
        "utf-8"), article_url=article_url, target_url=TARGET_URL)
    true_content = content_clean_result['content']
    # 得到缩略图
    thumb = content_clean_result['thumb']
    # 得到视频地址
    video_list = content_clean_result['video_list']
    # 得到摘要
    remark = setSummary(true_content)
    # 得到关键词
    keywords = setKeywords(true_content, separator=KEYWORD_SEPARATOR)

    # remark=true_content.split('。')[0]

    # 单篇文章数据
    more_info = params
    article_data = setArticleInfo(cid=cid,
                                  title=title, content=true_content, thumb=thumb, video=video_list, remark=remark, keywords=keywords, addtime=addtime, params=more_info)
    # 测试保存,正式使用的时候注释掉
    # ********************************************************
    # with open(download_path+"data.json", 'wb+') as target:
    #     target.write(article_data.encode('utf-8'))        #
    # ********************************************************
    with open(log_path+log_name, 'a+') as target:
        try:
            target.write(title+"||")
        except UnicodeEncodeError as e:
            print(e)
    return article_data


def articleListToJson(articles, datas, headers, cid='1'):
    '''
    单个栏目的文章转换为json数据保存到datas
    '''
    # return
    global article_count
    for article_url in articles:
        data = saveArticle(article_url, cid=cid,
                           headers=headers, publisher='杜成龙')
        if data:
            datas[str(article_count)] = data
            article_count = article_count+1
    print("采集{}篇".format(article_count))
    return


# ********************************主程序开始********************************

# 下载的目录
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36"
}


# 下载栏目
url = "http://www.81.cn/"
print("资源下载目录:{}".format(download_path))
print("当前关键字分隔符为‘{}’".format(KEYWORD_SEPARATOR))
if not (os.path.exists(download_asset_path)):
    os.makedirs(download_asset_path)
if not (os.path.exists(log_path)):
    os.makedirs(log_path)
# 以下为采集
if __name__ == "__main__":
    filename = 'jw_datas.json'
    datas = {}  # 最终数据
    article_count = 0  # 采集文章计数
    # 单篇文章采集
    single_article = []
    if single_article:
        print('单篇文章采集')
        articleListToJson(single_article, datas, headers, cid='3')
    else:
        articles = []
        cate_url = "http://www.81.cn/"
        html = getHtml(cate_url, headers)
        comment_article = html.xpath(
            '//div[@class="tab-body"]/ul/descendant::li/div/a/@href')
        counts = len(comment_article)

        for each in range(counts):
            if "http" in comment_article[each]:
                articles.append(comment_article[each])
            else:
                articles.append(url+comment_article[each])
        # articles.append("http://www.81.cn/syjdt/2021-05/07/content_10033113.htm")
        articleListToJson(articles, datas, headers, cid='3')
        # END
    # *******************************保存**************************************
    datas = json.dumps(datas, ensure_ascii=False)
    # 此处的长度不为0,dumps之后加了两个括号
    if len(datas) > 2:

        with open(download_path+filename, 'wb+') as target:
            target.write(datas.encode('utf-8'))
        print("成功保存{}篇文章".format(article_count))
        # 统计采集的文章总数
        if DEBUG:
            print("DEBUG模式不统计数据")
        else:
            with open(log_path+'count.txt', 'r') as target:
                saved=target.read()
            with open(log_path+'count.txt', 'w') as target:
                target.write(str(int(article_count)+int(saved)))
    else:
        print("没有需要更新的文章")

    # *******************************保存END***********************************
    # 单篇测试
    # article_url = "http://www.81.cn/syjdt/2021-05/07/content_10033113.htm"
    # r = requests.get(article_url)
    # html = getHtml(article_url)
    # content = html.xpath('//div[@id="article-content"]')[0]

    # content = etree.tostring(content, encoding="utf-8")
    # content = content.decode('utf-8').split('\n')
    # content = saveArticle(article_url=article_url,
    #                       headers=headers, publisher='杜成龙')

    # with open(download_path+'1.html', 'a+') as target:
    #     target.write(str(content))
    # test = saveArticle(article_url=article_url,
    #                    headers=headers, publisher='杜成龙')
    # with open(download_path+'datas.json', 'wb+') as target:
    #     target.write(test.encode('utf-8'))
# print(test)
