import re
import requests
import time
from lxml import etree

import MySQLUtils

"""
工具:
    js在线xpath代码  console.log(document.evaluate('your xpath路径',document).iterateNext())
教程文档:
    python菜鸟基础教程:https://www.runoob.com/python/python-tutorial.html
    request库官方文档:https://pypi.org/project/requests/
    xpath菜鸟教程:https://www.w3school.com.cn/xpath/index.asp
    etree官方文档:https://lxml.de/tutorial.html
网页分析：
    电影主页分析:
        第1页的网址：https://80s.tw/movie/list/-----p/1
        第2页的网址：https://80s.tw/movie/list/-----p/2
        第3页的网址：https://80s.tw/movie/list/-----p/3
        第4页的网址：https://80s.tw/movie/list/-----p/4
        第5页的网址：https://80s.tw/movie/list/-----p/5
    电影详情页分析（一个电影主页包含25个电影详情页，一个电影详情页包含1个电影详情):
        电影详情页的网址：
        https://80s.tw/movie/27681
        https://80s.tw/movie/*
        * 代表的是电影的序列号
爬取逻辑:
    先遍历电影主页，通过主页获取每个主页中包含的25个电影详情页连接，再访问电影详情页获取电影信息，并保存下来
本项目完成时间:
    截止2021.5.29为止,主页最大为552页 每页 25 个电影详情，共 552*25=13800部电影
"""
# 要爬取的网页地址
base_url = "https://80s.tw"
http_url = "https://80s.tw/movie/list/-----p/"


def get_response(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/81.0.4044.138 Safari/537.36 '
    }
    response = requests.get(url, headers)
    response.encoding = response.apparent_encoding
    response_text = response.text
    # print("---------------------------------------响应的response内容---------------------------------------")
    # print(response_text)
    # print("---------------------------------------响应的response内容---------------------------------------")
    return response_text


# 此函数功能是遍历一个主页，并得到一个主页中的25部电影详情
def get_info_from_response(url):
    response_text = get_response(url)
    # 利用xpath得到自己想要的信息时, 注意:网页上的xpath不一定是response_text中的xpath路径,路径可能会被改变(原因是某些Dom节点可能未被Js加载出来)

    # 对文本进行解析
    tree = etree.HTML(response_text)
    base_home_xpath = '/html/body/div[5]/div[1]/div[3]/ul[2]'
    # 对tree进行解析得到一个主页中有多少部电影详情（一般是25部）
    li_len = len(tree.xpath(base_home_xpath + '/li'))
    # 遍历每一部电影详情（实际上会遍历25次）
    # /html/body/div[5]/div[1]/div[3]/ul[2]/li[x第几部电影]/a
    last_error_li_position = -1
    for li_position in range(1, li_len):
        pos = li_position;
        try:
            get_info_from_each_movie(tree, base_home_xpath, li_position)
        except Exception as e:
            msg = "异常信息：  %s" % e
            print(msg)
            # 得到此部电影的详情连接 eg:https://80s.tw/movie/27692
            desc_href = base_url + tree.xpath(base_home_xpath + '/li[%s]/a[1]' % li_position)[0].attrib['href']
            # 得到电影编号
            mno = desc_href.replace('/movie/', '')
            save_log(mno, msg)
            if last_error_li_position != li_position:
                msg = "{}  Error: {}   位数: {}  重新执行中...".format(mno, url, str(li_position))
                print(msg)
                save_log(mno, msg)
                last_error_li_position = li_position
                get_info_from_each_movie(tree, base_home_xpath, li_position)
            else:
                msg = "ERROR_LOG:" + url + "   位数: " + str(li_position) + "跳过此电影...."
                print(msg)
                save_log(mno,msg)
                save_log(mno, "ShouldAgain: {}         {}".format(mno, desc_href))
                li_position = li_position + 1
                get_info_from_each_movie(tree,base_home_xpath,li_position)



def get_info_from_each_movie(tree, base_home_xpath, li_position):
    # 得到此部电影的详情连接 eg:https://80s.tw/movie/27692
    desc_href = tree.xpath(base_home_xpath + '/li[%s]/a[1]' % li_position)[0].attrib['href']
    # 得到电影编号
    mno = desc_href.replace('/movie/', '')
    # 得到电影名称 eg:摸金玦之守护人
    name = tree.xpath(base_home_xpath + '/li[%s]/h3/a/text()' % li_position)[0].replace('\n', '').strip()
    if is_repeat(mno):
        # 如果这条电影已经爬取过，就不用爬取了
        msg = '{}  名字：{}   已经爬取过，无需再次爬取'.format(mno, name)
        save_log(mno,msg)
        return
    else:
        time.sleep(3)
    # 得到电影声源类型 eg:HD国语中字
    source_type = tree.xpath(base_home_xpath + '/li[%s]/span/text()' % li_position)[0].replace('\n', '').strip()
    # 得到电影的略缩图地址
    img_path = 'http:' + tree.xpath(base_home_xpath + '/li[%s]/a/img' % li_position)[0].attrib['src']
    # 得到电影豆瓣评分
    score_elements = tree.xpath(base_home_xpath + '/li[%s]/a/span[2]/text()' % li_position)
    score = 0
    if len(score_elements) != 0 :
        score = score_elements[0]

    # ------------------------开始进入详情页------------------------
    # 进入电影详情页
    sub_response_text = get_response(base_url + desc_href)
    sub_tree = etree.HTML(sub_response_text)

    # 定义sub的基础xpath路径
    base_sub_xpath = '/html/body/div[5]/div/div[2]/div[1]/div/div[2]'
    # 得到电影日期
    confuse_date = sub_tree.xpath(base_sub_xpath + '/text()')
    date_array = re.findall(r'\d+', str(confuse_date))
    date = ''
    if len(date_array) > 0:
        date = date_array[0]
    # 得到电影又名
    other_name = replaceSomeStr(str(sub_tree.xpath(base_sub_xpath + '/span[2]/text()')))
    # 得到电影演员列表
    actor_array = []
    actors = sub_tree.xpath('//div[@class="info"]/span[3]/a')
    for actor in actors:
        # 得到一个电影演员
        actor_name = actor.text
        actor_array.append(actor_name)
    # 得到电影类型列表
    type_array = []
    types = sub_tree.xpath(base_sub_xpath + '/div[1]/span[1]/a')
    for type in types:
        type_name = type.text
        type_array.append(type_name)
    # 得到电影地区 eg:大陆
    place = sub_tree.xpath(base_sub_xpath + '/div[1]/span[2]/a')[0].text
    # 得到电影语言 eg:中文
    language = sub_tree.xpath(base_sub_xpath + '/div[1]/span[3]/a')[0].text
    # 得到电影导演 eg:王宝强
    director = sub_tree.xpath(base_sub_xpath + '/div[1]/span[4]/a')[0].text
    # 得到电影上线时间
    make_date = sub_tree.xpath(base_sub_xpath + '/div[1]/span[5]/text()')[0]
    # 得到电影更新时间
    modify_date = sub_tree.xpath(base_sub_xpath + '/div[1]/span[6]/text()')[0]
    # 得到电影简介
    descrip = replaceSomeStr(str(sub_tree.xpath(base_sub_xpath + '/div[3]/text()')))
    save_movie(mno, desc_href, source_type, img_path, score, name, date, other_name, actor_array, type_array, place,
               language, director, make_date, modify_date, descrip)


def save_movie(mno, desc_href, source_type, img_path, score, name, other_name, date, actor_array, type_array, place, language, director, make_date, modify_date, descrip):
    # INSERT INTO `80s`.`movie`(`mno`, `desc_href`, `source_type`, `img_path`, `score`, `name`, `other_name`, `date`, `actor_array`, `type_array`, `place`, `language`, `director`, `make_date`, `modify_date`, `desce`) VALUES (1, {}, '1', '1', 1, '1', '1', '2021-05-31', '1', '1', '1', '1', '1', '2021-06-03', '2021-06-01', '1');
    insert_sql = """
        INSERT INTO `80s`.`movie`(`mno`, `desc_href`, `source_type`, `img_path`, `score`, `name`, `other_name`, `date`, `actor_array`, `type_array`, `place`, `language`, `director`, `make_date`, `modify_date`, `desce`) 
        VALUES 
        ({}, '{}', '{}', '{}', {}, '{}', '{}', {}, "{}", "{}", '{}', '{}', '{}', '{}', '{}', '{}');
        """.format(mno, base_url+desc_href, source_type, img_path, score, name, date, other_name, str(actor_array),
                   str(type_array), place, language, director, make_date, modify_date, descrip)
    print("执行的SQL:  " + insert_sql)
    MySQLUtils.update(insert_sql)
    msg = "FirstInsert:{}, 名字：{} 电影已插入".format(mno, name)
    print(msg)
    save_log(mno,msg)


def save_log(mno, msg):
    MySQLUtils.update("insert into movie_log (mno, msg) values ({},'{}')".format(mno, msg))

def is_repeat(mno):
    select_sql = "select 1 from movie where mno = " + mno
    info = MySQLUtils.query(select_sql).fetchall()
    if len(info) == 0:
        return False
    else:
        return True

# 此函数的功能是删除字符串中的 \n \t [ ] " , \" \ n ' t 和空格
def replaceSomeStr(s):
    return s.replace('\n','').replace('\t','')\
        .replace('[','').replace(']','')\
        .replace('"','').replace(',','')\
        .replace('\"','').replace('\\','')\
        .replace('n','').replace('\'','')\
        .replace('t','').replace('编辑整理','')\
        .strip()


def go(page, end_page):
    print("当前正在开始新的一页..... 休息3秒 page: " + str(page))
    save_log(-1,page)
    time.sleep(3)
    try:
        get_info_from_response(http_url + str(page))
    except Exception as e:
        msg = "异常信息：  %s" % e
        print(msg)
        save_log(page, msg)
        time.sleep(30)
        msg = "Break:休息30S....................................."
        print()
        save_log(page,msg)
        get_info_from_response(http_url + str(page))
    finally:
        go(page + 1, end_page)


if __name__ == '__main__':
    # get_response(http_url)
    go(3, 552)





