import requests
from lxml import etree

BASE_URL = 'https://www.dy2018.com'

headers = {
  "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"
}




def get_detail_urls(url):
  response = requests.get(url, headers=headers)
  # text = response.content.decode('gbk')
  text = response.text
  html = etree.HTML(text)
  detail_urls = html.xpath("//table[@class='tbspan']//a/@href")
  return map(lambda url:BASE_URL+url, detail_urls)

def parse_detail_url(url):
  movie = {}
  response = requests.get(url, headers=headers)
  text = response.content.decode('gbk')
  html = etree.HTML(text)
  title = html.xpath("//h1/text()")[0]
  movie['title'] = title
  zoomE = html.xpath("//div[@id='Zoom']")[0]
  cover = zoomE.xpath(".//img/@src")[0]
  movie['cover'] = cover
  infos = zoomE.xpath(".//text()")
  # print(etree.tostring(infos,encoding='utf-8').decode('utf-8'))
  for index,info in enumerate(infos):
    if info.startswith("◎译　　名"):
      info = info.replace("◎译　　名", "").strip()
      movie['name'] = info
    elif info.startswith("◎年　　代"):
      info = info.replace("◎年　　代", "").strip()
      movie['year'] = info
    elif info.startswith("◎类　　别"):
      info = info.replace("◎类　　别", "").strip()
      movie['type'] = info
    elif info.startswith("◎上映日期"):
      info = info.replace("◎上映日期", "").strip()
      movie['date'] = info
    elif info.startswith("◎主　　演"):
      info = info.replace("◎主　　演", "").strip()
      authors = [info]
      for x in range(index+1, len(infos)):
        if infos[x].startswith("◎"):
          break
        authors.append(infos[x].strip())
      movie['authors'] = authors
  download = html.xpath(".//div[@id='downlist']//a/@href")[0]
  movie['download'] = download
  return movie


def spider():
  base_url = "https://www.dy2018.com/html/gndy/dyzz/index_{}.html"
  movies = []
  for x in range(2,3):
    url = base_url.format(x) if x > 1 else "https://www.dy2018.com/html/gndy/dyzz/index.html"
    details = get_detail_urls(url)
    for detail_url in details:
      movie = parse_detail_url(detail_url)
      movies.append(movie)
  print(movies)

spider()