import base64
import json
import os.path
import re
import time

import hashlib
import requests


headers = {
  'accept': 'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01',
  'accept-encoding': 'gzip, deflate, br',
  'accept-language': 'zh-CN,zh;q=0.9',
  'cookie': '_tb_token_=berT80V49uJ9PFEJKGPI; cna=IhV+FpiDqRsCAXE54OSIgfFP; v=0; t=bb1c685b877ff64669f99c9dade7042c; cookie2=1e5103120f9886062722c86a5fad8c64; uc1=cookie14=UoTbm8P7LhIRQg%3D%3D; isg=BJWVw-e2ZCOuRUDfqsuI4YF0pJFFPHuu_ffxbBc6UYxbbrVg3-JZdKMoODL97mFc; l=dBMDiW9Rqv8wgDSFBOCiVZ9JHt_OSIRAguWfypeMi_5Zl681GgQOkUvZ8FJ6VjWftBTB4tm2-g29-etki6jgwbd6TCNQOxDc.',
  'referer': 'https://item-paimai.taobao.com/pmp_item/609160317276.htm?s=pmp_detail&spm=a213x.7340941.2001.61.1aec2cb6RKlKoy',
  'sec-fetch-mode': 'cors',
  "sec-fetch-site": 'same-origin',
  'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
  'x-requested-with': 'XMLHttpRequest'
}


def GetMd5(s):
  return hashlib.md5(s.encode(encoding='utf-8')).hexdigest()


def RTxt(path):
  with open(path, encoding='utf8') as f:
    return f.read()


def WTxt(path, content):
  if '?' in path:
    path = path.replace('?', '？')
  with open(path, 'w', encoding='utf8') as f:
    f.write(content)


def ATxt(path, content):
  with open(path, 'a', encoding='utf8') as f:
    f.write(content)


def Find(html, reList):
  reResult = [html]
  for reStr in reList:
    reResult = re.findall(reStr, reResult[0], re.S)
    if not reResult:
      # print(f'未在"{html[:60]}..."里匹配{reStr}...')
      return []
  return reResult


def Replace(html, *args):
  for key, value in args:
    html = html.replace(key, value)
  return html


def GetHtml(url, www, catalogueUrl):
  if not os.path.exists('temp'):
    os.mkdir('temp')
  if url.startswith('./'):
    url = catalogueUrl + url[2:]
  else:
    if not url.startswith('http'):
      if '/' not in url:
        url = catalogueUrl + url
      else:
        url = www + url
  # print('GetHtml.url=', url, www, catalogueUrl)
  base64Url = GetMd5(url)
  tempPath = f'temp/{base64Url}'
  if os.path.exists(tempPath):
    return RTxt(tempPath)
  html = ''
  while html == '':
    try:
      html = requests.get(url, headers=headers, timeout=(2, 3)).text
    except Exception as e:
      print('[error]GetHtml.e=', e)
      time.sleep(2.0)
  WTxt(tempPath, html)
  return html


def JudgeWordsInLine(line, words):
  for word in words:
    if word in line:
      return True
  return False


def Format(content, ignoreWords):
  lines = content.split('\n')
  result = []
  for lineId in range(len(lines)):
    line = lines[lineId]
    if line == '':
      continue
    if JudgeWordsInLine(line, ignoreWords):
      continue
    line = re.sub(r'^\s*(.*?)\s*$', r'\1', line)
    if result and result[-1][-1] == '，':
      result[-1] += line
    else:
      result.append(' ' * 4 + line)
  return '\n'.join(result)


class Downloader:
  def __init__(self, name, catalogueUrl, configPath, saveChapter=False):
    self.name = name
    self.saveChapter = saveChapter
    self.catalogueUrl = catalogueUrl
    self.www = '/'.join(self.catalogueUrl.split('/')[:3])
    self.configPath = configPath
    self.configLoaded = False
    self.config = {}
    if os.path.exists(self.configPath):
      try:
        self.config = json.loads(RTxt(self.configPath))
        self.configLoaded = True
      except Exception as e:
        print(f'加载"{self.configPath}"出现错误"{e}"，脚本终止！')
    else:
      print(f'未找到配置文件路径"{self.configPath}"，脚本终止！')

  def GetCatalogueUrls(self, catalogueUrl):
    print(f'准备从"{catalogueUrl}"解析目录……')
    return self.GetContent(catalogueUrl, self.config['目录提取正则表达式'], self.config.get('目录下一页提取正则表达式', []))

  def GetContent(self, url, getRex, nextRex):
    print('准备开始解析Url：', url)
    html = GetHtml(url, self.www, self.catalogueUrl)
    result = Find(html, getRex)
    # print('解析页面=', html)
    # print('解析结果=', result)
    if nextRex:
      nextPageUrl = Find(html, nextRex)
      # print('下文解析结果=', nextPageUrl, nextRex)
      if nextPageUrl:
        result += self.GetContent(nextPageUrl[0], getRex, nextRex)
    return result

  def Run(self):
    if os.path.exists(f'{self.name}.txt'):
      os.remove(f'{self.name}.txt')
    # 提取目录url
    catalogueUrls = self.GetCatalogueUrls(self.catalogueUrl)
    print(f'目录匹配项数量：{len(catalogueUrls)}')
    # print('catalogueUrls=', '\n'.join(catalogueUrls))
    # 开始爬取章节
    print('准备开始爬取章节...')
    if not os.path.exists('temp'):
      os.mkdir('temp')
    for catalogueUrl in catalogueUrls:
      print(f'爬取"{catalogueUrl}"中...{catalogueUrls.index(catalogueUrl) + 1}/{len(catalogueUrls)}')
      html = GetHtml(catalogueUrl, self.www, self.catalogueUrl)
      # print('html=', html)
      title = Find(html, self.config['正文提取正则表达式']['标题'])
      if not title:
        print(f'爬取"{self.catalogueUrl + catalogueUrl}"标题失败……')
        # print('html=', html)
        continue
      title = title[0]
      content = self.GetContent(catalogueUrl, self.config['正文提取正则表达式']['正文'], self.config['正文提取正则表达式'].get('下一页', []))
      if not content:
        print(f'爬取"{self.catalogueUrl + catalogueUrl}"正文失败……')
        continue
      content = '\n'.join(content)
      content = Replace(content, ['&nbsp;', ''], ['<br />', '\n'], ['<br>', '\n'])
      for key, value in self.config.get('替换索引', {}).items():
        # content = content.replace(key, value)
        content = re.sub(key, value, content, flags=re.M)
      content = Format(content, self.config['无效行关键词'])
      if title not in content:
        content = f'{title}\n{content}'
      print(f'爬取到章节"{title}"，正文约{len(content)}字……')
      ATxt(f'{self.name}.txt', content + '\n')
      self.SaveChapter(title, content)
      # return

    print(f'爬取完毕！所有内容存放至"{self.name}.txt"！')

  def SaveChapter(self, title, content):
    if not self.saveChapter:
      return
    outPath = 'chapter/{}/{}'.format(self.name, self.configPath)
    if not os.path.exists(outPath):
      os.makedirs(outPath)
    WTxt(f'{outPath}/{title}.txt', content)
