import os
import sys
import re
import requests
import json
import random
import time
from lxml import etree
from fake_useragent import UserAgent

# 把当前运行目录添加到系统环境变量中
cwd = os.getcwd()
sys.path.append(cwd)

from pub_func import folder_exist, file_write, file_readlines, download

base_url = 'https://mp.weixin.qq.com/cgi-bin/appmsg'
ua = UserAgent()
headers = {
    'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
    # cookie需要替换
    'cookie': 'pgv_pvi=2781868032; pgv_si=s4599799808; cert=rXo7W6d2I1SKuWSZPnVAOIp68hEd0FLn; uuid=56e70559b55b7d57cba0e899e82341c6; ticket=eb8ea09045d5d10cc56018ba08a8c9314c88f923; ticket_id=gh_9fcc355cd60c; noticeLoginFlag=1; data_bizuin=3577584030; data_ticket=JY4r8NJDuaUMvUJJ/plokJk7fljQWr0E/wUn7vB1sxHGx4KXrb3brwnDwWDKhkMQ; ua_id=584ggvmAz9oEESXfAAAAAPzBQTirE-DDsNPlpBmJDig=; xid=2033b50a8b0bb5a8f4d8d022e3020b16; openid2ticket_onpk91aWV7SWw4aUp62Sy65ap3jc=U2S7zR4gn3NGosUbBCX/tgt6xLMwQyg6yKkcWdZE5kM=; mm_lang=zh_CN; slave_user=gh_9fcc355cd60c; slave_sid=T0lnNnFYYThWX2w2OGRsZjdRMTVaR0RrSmpMX0Jja2VQTDFjNDlPSVluYmNUamF0bTNVbWNzMHZQQ2RHV2tsankyNFcyV3FkeEdMYmlfVkY0RXNzUlVjUTFidXp5YjU1b2E3cWphUTU2eGo4YlMxT1ROUm1adnpKV0NnNEVPdnBQWGVVcWJTbUd4bTdwQVZt; bizuin=3538830699; rewardsn=; wxtokenkey=777',
    # refer的token需要替换
    'referer': 'https://mp.weixin.qq.com/cgi-bin/appmsg?t=media/appmsg_edit_v2&action=edit&isNew=1&type=10&token=1268557685&lang=zh_CN'
}
# 保存图片和对应文字文本的目录
dir_path = './article/gongzhonghao'
# 下载好的文章信息文件
file_path = './article/article.csv'
# 文件行数索引
article_row = 0

list_article_info = file_readlines(file_path)
for article_info in list_article_info:
    article_row += 1
    article_time = article_info.split(',')[0]
    article_title = article_info.split(',')[1]
    article_url = article_info.split(',')[2].strip()

    headers['User-Agent'] = ua.random
    response = requests.get(url=article_url, headers=headers)
    # print(response.text)

    html = etree.HTML(response.text)
    # 图片和图片描述都在(id="js_content")下的p标签
    list_img_info = html.xpath('//*[@id="js_content"]//p')
    i = 1

    for img_info in list_img_info:
        img_desc = img_info.xpath('span/text()')
        img_url = img_info.xpath('img/@data-src')
        img_name = article_time + '_' + str(article_row) + '_'  + str(i)
        if img_desc != []:
            if img_url != []:
                print(img_url[0], img_desc[0], img_name)
                pattern = 'wx_fmt=([a-zA-Z]+)'
                result = 'png'
                try:
                    result = re.search(pattern, img_url[0]).group(1)
                except Exception as e:
                    pass
                img_name = article_time + '_' + str(article_row) + '_'  + str(i) + '.' + result
                file_path = os.path.join(dir_path, img_name)
                download(img_url[0], file_path)
                file_write(dir_path + '/' + article_time + '_' + str(article_row) +  '_' + str(i) + '.txt', img_desc[0])
                i += 1
            elif '▲' in img_desc[0]:
                prev_img_url = img_info.xpath("preceding-sibling::p[1]/img/@data-src")
                if prev_img_url != []:
                    print(prev_img_url[0], img_desc[0], img_name)
                    pattern = 'wx_fmt=([a-zA-Z]+)'
                    result = 'png'
                    try:
                        result = re.search(pattern, prev_img_url[0]).group(1)
                    except Exception as e:
                        pass
                    img_name = article_time + '_' + str(article_row) + '_'  + str(i) + '.' + result
                    file_path = os.path.join(dir_path, img_name)
                    download(prev_img_url[0], file_path)
                    file_write(dir_path + '/' + article_time + '_' + str(article_row) +  '_' + str(i) + '.txt', img_desc[0])
                    i += 1
    time.sleep(2)