# -*- coding: UTF-8 -*-
from urllib import request
from urllib.request import urlretrieve
import sys
import subprocess
import os
import urllib
import requests
from bs4 import BeautifulSoup
import re
import PIL.Image as im
import imghdr
import platform

''' 
单文件测试代码：python3 wx.py [target_url] [saveDir] [html_txt]
[target_url]-爬取链接
[saveDir]-保存目录
[html_txt]-html文本
示例：python3 wx.py https://mp.weixin.qq.com/s/TJREhU29ohMRR5hxPGc8PQ /Users/wucheng/Downloads html.txt

pyinstaller生成可执行程序：sudo pyinstaller -F wx.py

pip3安装依赖：beautifulsoup4 pyinstaller requests pillow
'''

# 1.获取命令行参数
target = sys.argv[1]
saveDir = sys.argv[2]
html_txt = sys.argv[3]


def report(a,b,c):
	per = 100.0 * a * b / c
	if per > 100:
		per = 100
	print ('\r %.2f%%' % per,end="")


opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE')]
urllib.request.install_opener(opener)

# print("视频下载中")
# request.urlretrieve(url=video_url,filename='wx.mp4',reporthook=report,data=None)
# print()


headers = {
            "Accept": "*/*",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
            "Connection": "keep-alive",
            "Host": "localhost",
            "DNT": "1",
            "sec-ch-ua": '"Chromium";v="88", "Google Chrome";v="88", ";Not A Brand";v="99"',
            "sec-ch-ua-mobile": "?0",
            "Sec-Fetch-Dest": 'empty',
            "Sec-Fetch-Mode": "cors",
            "Sec-Fetch-Site": "same-origin",
            "X-Requested-With": "XMLHttpRequest"
        }


def getText(parent):
	# 过滤字符串中的<br>,<br/>
	p = str(parent)#先转化成字符串
	p = p.replace('<br/>','\r\n')
	p = p.replace('<br>','\r\n')
	# 过滤空格和特殊字符
	p = re.sub(' +| +', '\r\n', p)
	#重新转化成bs4对象
	parent = BeautifulSoup(p,'html.parser')
	return '\r\n'.join(parent.find_all(text=True, recursive=True))

# 用BeautifulSoup解析数据  python3 必须传入参数二'html.parser' 得到一个对象，接下来获取对象的相关属性
# html=BeautifulSoup(req.text,'html.parser')

# 获取文件内容
html_file = open(html_txt, 'r', encoding='utf-8')
# 若文件不存在,报错，若存在，读取
open_html_file = html_file.readlines()
#readlines()方法读取所有剩余行,将他们作为一个字符串列表返回
html_file.close()

html=BeautifulSoup(str(open_html_file),'html.parser')



##########
# 平台选择
##########

#------微信
if 'weixin.qq.com' in target:
	#文章标题
	title = getText(html.select("h1.rich_media_title")[0].string).replace("\\n\',",'').replace('\'','').strip()
	print(title)
	content = ""
	images = []

	# 如果 tag 包含了多个子节点，tag 就无法确定，string 方法应该调用哪个子节点的内容，.string 的输出结果是 None
	for p in html.select("div.rich_media_content > *"):
		content += getText(p)
		#搜集图片
		for img in p.find_all('img'):
			images.append(img.get('data-src'))

#------今日头条
if 'toutiao.com' in target:

	if html.select("div[class='weitoutiao-html']"):#微头条

		#文章标题
		# title = ''
		content = ""
		images = []

		# 如果 tag 包含了多个子节点，tag 就无法确定，string 方法应该调用哪个子节点的内容，.string 的输出结果是 None
		for p in html.select("div[class='weitoutiao-html']"):
			content += getText(p)
		#搜集图片
		for img in html.select('.weitoutiao-img'):
			images.append(img.get('src'))

		title = content[0:11]

	elif html.select("div[class='article-content']"):#普通文章

		#文章标题
		title = html.select("div.article-content > h1")[0].string
		content = ""
		images = []

		for p in html.select("div.article-content > *"):
			content += getText(p)
			#搜集图片
			for img in p.find_all('img'):
				images.append(img.get('src'))



# print(title)
# print(content)
# print(images)




#创建文件夹
baseDir = saveDir + os.sep + title
if os.path.exists(baseDir) :
	for f in os.listdir(baseDir):
		os.path.isfile(baseDir + os.sep + f)
		os.remove(baseDir + os.sep + f)
	os.rmdir(baseDir)

os.mkdir(baseDir)
#下载图片
for img in images:
	if(img.startswith('//')):
		imgUrl = "https:" + img
	else:
		imgUrl = img
	#下载图片
	urlretrieve(imgUrl, baseDir + os.sep + str(images.index(img)) + ".jpg")
	#把任意图片转成jpg
	if imghdr.what(baseDir + os.sep + str(images.index(img)) + ".jpg") == 'png' or 'jpeg':
		a=im.open(baseDir + os.sep + str(images.index(img)) + ".jpg")
		a = a.convert("RGB")
		a.save(baseDir + os.sep + str(images.index(img)) + ".jpg")
	else:
		a=im.open(baseDir + os.sep + str(images.index(img)) + ".jpg")
		a.save(baseDir + os.sep + str(images.index(img)) + ".jpg")
#下载文本

my_open = open(baseDir + os.sep + '文章.txt', 'w')
#打开fie_name2路径下的my_write.txt文件,采用写入模式
#若文件不存在,创建，若存在，清空并写入
my_open.write(content)
my_open.close()





