#encoding=utf-8
'''
注：nltk是python的独立nlp平台模块，可下载相应的工具模块以纯python环境使用,如下：
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
!!!!!!!!!!!!!!!!!!!!!!nltk.download('wordnet')!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  Searched in:
    - 'C:\\Users\\Administrator/nltk_data'
    - 'C:\\D\\tools\\Python\\Python38\\nltk_data'
    - 'C:\\D\\tools\\Python\\Python38\\share\\nltk_data'
    - 'C:\\D\\tools\\Python\\Python38\\lib\\nltk_data'
    - 'C:\\Users\\Administrator\\AppData\\Roaming\\nltk_data'
    - 'C:\\nltk_data'
    - 'D:\\nltk_data'
    - 'E:\\nltk_data'

此demo只为初步体验语法解析结构图
Tree.fromstring(nlp_parse).draw()
本代码实现：
（1）处理txt文件中的内容

1.安装python模块：
    pip install stanfordcorenlp
    pip install nltk
2.下载java包 stanford-corenlp-x.x.x ，供StanfordCoreNLP调用
  nlp = StanfordCoreNLP(r'C:/D/tools/java/jdk1.8.0_121/lib/stanford-corenlp-4.2.0',lang='en')
'''

#获取词干
from nltk.stem.porter import PorterStemmer
ps = PorterStemmer()
def psstem(word):
  return ps.stem(word)
#获取词汇
from nltk.stem.wordnet import WordNetLemmatizer
wnl = WordNetLemmatizer()
def wnllem(word):
  return wnl.lemmatize(word)
#获取停用词
from nltk.corpus import stopwords
stopwordList = stopwords.words('english') 
def getstopwords():
  return stopwords.words('english')
#nltk分句
from nltk.tokenize import sent_tokenize
def getsentens(mysentences):
  return sent_tokenize(mysentences)
#nltk分词
from nltk.tokenize import word_tokenize
def getwords(mysentence):
  return word_tokenize(mysentence)


import sys,requests,json
import html2text
from bs4 import BeautifulSoup
from urllib.parse import unquote

def request_get(_url:str,_header:dict):
    try:
        # 发送 get 请求
        response = requests.get(_url,headers=_header)
        # 检查响应状态码
        if response.status_code == 200:
            return response.text
        else:
            return f"请求失败，状态码: {response.status_code}"
    except requests.RequestException as e:
        return f"请求发生错误: {e}"

# 获取toutiao网站上的文章图片url 生成markdown文档
def processToutiaoArticlePic(url:str):
  putCommonHeaders('toutiao')
  HEADERS_COMMON['path'] = url.replace('https://www.toutiao.com','')
  html = request_get(url,HEADERS_COMMON)
  bsp = BeautifulSoup(html,'html.parser')
  article_content_md_txt = ''
  article_content = bsp.find_all('div',class_="article-content")
  if  len(article_content)>0:
    article_content_md_txt = html2text.html2text(str(article_content[0]))
  info = bsp.find_all('script',type='application/ld+json')
  if len(info) == 0:
    return
  json_str_info = info[0].string
  json_obj_info = json.loads(json_str_info)
  headline = deleteIllegalChar(json_obj_info['headline'])
  description = json_obj_info['description']
  imageUrls = json_obj_info['image']
  markdown_txt = f'# {headline}\n\n> {description}\n\n|no|image|\n|---|---|\n'
  txt = ''
  no = 1
  for i in imageUrls:
    # ![Alt](url)
    markdown_txt += f'|{str(no)}|![^_`]({i})|\n'
    txt += f'{i}\n'
    no += 1
  writeFileForContent('./content.txt',txt,model='a')
  writeFileForContent(f'{headline}.md',f'{markdown_txt}\n{article_content_md_txt}')

# 获取toutiao网站上的微头条图片url 生成markdown文档
def processToutiaoWeitoutiaoPic(url:str):
  putCommonHeaders('toutiao')
  html = request_get(url,HEADERS_COMMON)
  bsp = BeautifulSoup(html,'html.parser')
  info = bsp.find_all('script',id='RENDER_DATA')
  if len(info) == 0:
    print(f'fetch target element error:{url}\nsee the html in local toutiao.html!')
    with open('./toutiao.html','w',encoding='utf-8') as w:
      w.write(html)
    exit()
  url_no_decode = info[0].string
  json_obj_info = json.loads(unquote(url_no_decode))
  headline = deleteIllegalChar(json_obj_info['data']['ugcShare']['share_title'])
  # description = json_obj_info['description']
  imageUrls = json_obj_info['data']['ugcImages']
  markdown_txt = f'|no|image|\n|---|---|\n'
  txt = ''
  no = 1
  for i in imageUrls:
    # ![Alt](url)
    markdown_txt += f'|{str(no)}|![^_`](https:{i})|\n'
    txt += f'https:{i}\n'
    no += 1
  writeFileForContent('./content.txt',txt,model='a')
  writeFileForContent(f'{headline[:5]}.md',markdown_txt)

HEADERS_COMMON = {
  'method':'GET'
  ,'scheme':'https'
  ,'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'
  ,'accept-encoding':'gzip, deflate' # 'gzip, deflate, br'
  ,'accept-language':'zh-CN,zh;q=0.9'
  ,'cache-control':'max-age=0'
  ,'sec-ch-ua':'"Not_A Brand";v="99", "Google Chrome";v="109", "Chromium";v="109"'
  ,'sec-ch-ua-mobile':'?0'
  ,'sec-ch-ua-platform':'"Windows"'
  ,'sec-fetch-dest':'document' 
  ,'sec-fetch-mode':'navigate'
  ,'sec-fetch-site':'same-origin' # cross-site
  ,'sec-fetch-user':'?1'
  ,'upgrade-insecure-requests':'1'
  ,'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36'
}
import re
# bilibili_info_pattern = re.compile('<script>window\.__playinfo__=(.*?)</script>')
bilibili_info_pattern = '<script>window\.__playinfo__=(.*?)</script>'
def processBilibili(_url:str):
  putCommonHeaders('bilibili')
  html = request_get(_url,HEADERS_COMMON)
  bilibili_info = re.search(bilibili_info_pattern,html)
  if None == bilibili_info:
    print('bilibili_info == None\n')
    exit()
  with open('./bilibili.json','w',encoding='utf-8') as w:
    w.write(bilibili_info[1])
  exit()
  bsp = BeautifulSoup(html,'html.parser')
  info = bsp.find_all('script',id='RENDER_DATA')
  if len(info) == 0:
    print(f'fetch target element error:{_url}\nsee the html in local toutiao.html!')
    with open('./toutiao.html','w',encoding='utf-8') as w:
      w.write(html)
    exit()
  url_no_decode = info[0].string
  json_obj_info = json.loads(unquote(url_no_decode))
  headline = deleteIllegalChar(json_obj_info['data']['ugcShare']['share_title'])
  # description = json_obj_info['description']
  imageUrls = json_obj_info['data']['ugcImages']
  markdown_txt = f'|no|image|\n|---|---|\n'
  no = 1
  for i in imageUrls:
    # ![Alt](url)
    markdown_txt += f'|{str(no)}|![^_`](https:{i})|\n'
    no += 1
    # print(i)
  with open(f'{headline}.md','w',encoding='utf-8') as m:
    m.write(markdown_txt)

# 从cookies.json文件中读取cookie
# headers.json格式样例
# {
#     "zhihu":{
#         "cookie":""
#         ,"authority":"www.zhihu.com"
#     }
#     ,"toutiao":{}
#     ,"bilibili":{}
# }
def getHeadersDict(domain:str):
  with open('./headers.json','r',encoding='utf-8') as c:
    headers_txt = c.read()
  headers_json = json.loads(headers_txt)
  for d,v in headers_json.items():
    if d == domain:
      v['cookie'] = v['cookie'].encode('utf-8').decode('latin-1')
      return v
  return None

def putCommonHeaders(domain:str):
  special_headers = getHeadersDict(domain)
  if special_headers:
    for k,v in special_headers.items():
      HEADERS_COMMON[k] = v

# 知乎
zhihu_p_url_pattern = 'p/(\d+)'
zhihu_qustion_url_pattern = 'question/(\d+)'
zhihu_answer_url_pattern = 'answer/(\d+)'
def processZhihuPArticle(_url:str):
  # p_no_p = re.search(zhihu_p_url_pattern,_url)
  # if p_no_p:
  #   p_no = p_no_p[1]
  putCommonHeaders('zhihu')
  html = request_get(_url,HEADERS_COMMON)
  bsp = BeautifulSoup(html,'html.parser')
  title = deleteIllegalChar(bsp.find('title').string)
  figures = bsp.find_all('figure', attrs={'data-size':'normal'})
  markdown_txt = f'|no|image|\n|---|---|\n'
  txt = ''
  no = 1
  for f in figures:
    imgs = f.find_all('img')
    for im in imgs:
      markdown_txt += f"|{str(no)}|![^_`]({im['data-original']})|\n"
      txt += f"{im['data-original']}\n"
      no += 1 
  writeFileForContent('./content.txt',txt,model='a')
  writeFileForContent(f'{title[:5]}.md',markdown_txt)

def processZhihuQeustion(_url:str):
  question_no_p = re.search(zhihu_qustion_url_pattern,_url)
  if question_no_p:
    question_no = question_no_p[1]
  answer_no_p = re.search(zhihu_answer_url_pattern,_url)
  if answer_no_p:
    answer_no = answer_no_p[1]
  putCommonHeaders('zhihu')
  html = request_get(_url,HEADERS_COMMON)
  bsp = BeautifulSoup(html,'html.parser')
  titles = bsp.find_all('title')
  if len(titles)>0:
    title = titles[0].string
  else:
    title = _url.split('/')[:-1]
  markdown_txt = f'|no|image|\n|---|---|\n'
  txt = ''
  info_json_html = bsp.find_all('script',id='js-initialData')
  if len(info_json_html)>0:
    info_json_html = info_json_html[0].string
    info_json = json.loads(info_json_html)
    content = info_json['initialState']['entities']['answers'][answer_no]['content']
    title = deleteIllegalChar(info_json['initialState']['entities']['questions'][question_no]['title'])
    info_html_bsp = BeautifulSoup(content,'html.parser')
    no = 1
    for img in info_html_bsp.find_all('img',class_='lazy'):
        if not img.has_attr('data-original'):
          continue
        markdown_txt += f"|{str(no)}|![^_`]({img['data-original']})|\n"
        txt += f"{img['data-original']}\n"
        no += 1 
  writeFileForContent('./content.txt',txt,model='a')
  writeFileForContent(f'{title[:5]}.md',markdown_txt)

def writeFileForContent(outputFullName:str,content:str,model='w',encoding='utf-8'):
  with open(outputFullName,model,encoding=encoding) as w:
    w.write(content)

# 知乎网主函数
def mainProcessZhihu(_url:str):
  if '/p/' in _url:
    processZhihuPArticle(_url)
  elif '/answer/' in _url:
    processZhihuQeustion(_url)
# 头条网主函数
def mainProcessToutiao(_url:str):
  if '/article/' in _url:
    processToutiaoArticlePic(_url)
  elif '/w/' in _url:
    processToutiaoWeitoutiaoPic(_url)

# 去除文件名非法字符
def deleteIllegalChar(_name:str):
  rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
  new_name = re.sub(rstr, "", _name)
  return new_name

# 转html为markdown
def convertHtmlToMarkdown(inputContentFilePathName:str,outputFllePathName:str):
  with open(inputContentFilePathName,'r',encoding='utf-8') as input:
    content = input.read()
  writeFileForContent(outputFllePathName,html2text.html2text(content))
  

if __name__ == '__main__':
  _url = ''
  if len(sys.argv) == 3 :
    _web = sys.argv[1]
    _url = sys.argv[2]
    if 'zhihu' == _web:
      mainProcessZhihu(_url)
    elif 'toutiao' == _web:
      mainProcessToutiao(_url)
  # 知乎
  # mainProcessZhihu(_url)
  # 头条
  # mainProcessToutiao(_url)
  # processBilibili(_url)
  # convertHtmlToMarkdown('./content.txt','./content.md')
  exit()
  sentence = ''
  try:
    input_file = open('content.txt','r')
    sentence = input_file.read()
  except Exception as es:
    print('exception:::::: %s' % str(es))
  finally:
    pass
  targetworddict = {}
  sentenceList = getsentens(sentence)#分句
  for sen in sentenceList:
    wordList = getwords(sen)#分词
    for word in wordList:
      if word not in stopwordList:#去停用词
        targetworddict[word] = psstem(word)
  for k,v in targetworddict.items():
    print("%s---> %s " % (k,v))