# -*- coding: utf-8 -*-
'''
python manage.py runJob -m movie
'''
import hashlib
import json
import time

import requests
from bs4 import BeautifulSoup
from dateutil.parser import parse
from faker import Factory

from application import db
from common.libs.DateHelper import getCurrentTime
from common.models.movie_cat import MovieCat


class JobTask():
    HOST = "https://www.dygod.net"
    headers = {'User-Agent': Factory.create().user_agent()}

    def run(self, params):
        """
        :param params: 1： 从网页扫描录入  2：从文件导入
        :return:
        """
        if params['act'] == '1':
            if len(params['param']) == 2:
                """
                python manager.py runJob -m movie -a 1 -p 73 76 
                """
                self.runJob_1(start=int(params['param'][0]), end=int(params['param'][1]))
            else:
                """
                python manager.py runJob -m movie -a 1 -p 3 
                """
                self.runJob_1(end=int(params['param'][0]))
        else:
            """
            python manager.py runJob -m movie -a 2 -p data_new.json 
            """
            filename = params['param'][0]
            self.runJob_2(filename=filename)

    def runJob_1(self, end, start=1):
        if end < start:
            end, start = start, end
        for i in range(start, end + 1):
            items = self.getList(i)
            # print(items)
            for item in items:
                try:
                    data = self.get_movie_info(item)
                    print(data)
                    tmp_movie_info = MovieCat.query.filter_by(hash=data['hash']).first()
                    if tmp_movie_info:
                        # print('111111')
                        continue
                    tmp_model_movie = MovieCat(**data)
                    db.session.add(tmp_model_movie)
                    db.session.commit()
                except Exception as e:
                    print(e)
                    db.session.rollback()
                    pass
            time.sleep(0.4)

    def runJob_2(self, filename):
        with open(filename, 'r') as f:
            data = json.load(f)
        for item in data:
            tmp_movie_info = MovieCat.query.filter_by(hash=item['hash']).first()
            if tmp_movie_info:
                continue
            tmp_model_movie = MovieCat(**item)
            db.session.add(tmp_model_movie)
            db.session.commit()

    def getList(self, num):
        url = "/html/gndy/china/index.html" if num < 2 else "/html/gndy/china/index_{}.html".format(num)
        resp = requests.get(self.HOST + url)
        soup = BeautifulSoup(resp.content, "html.parser")
        items = soup.select("div.co_content8 ul td table")
        resp.close()
        return items

    def get_movie_info(self, item):
        data_child = {"classify": item.select("td")[2].select("a")[0].text[1:-1],
                      "url": self.HOST + item.select("td")[2].select("a")[1]['href'],
                      'name': item.select("td")[2].select("a")[1]['title'].split('《')[1].split("》")[0] if '《' in
                                                                                                          item.select(
                                                                                                              "td")[
                                                                                                              2].select(
                                                                                                              "a")[1][
                                                                                                              'title'] else
                      item.select("td")[2].select("a")[1]['title']
                      }

        time.sleep(0.5)
        resp_child = requests.get(data_child["url"], headers=self.headers)
        soup_child = BeautifulSoup(resp_child.content, "html.parser")
        # print(111)
        data_child['name'] = soup_child.select(".title_all")[0].text.split('《')[1].split("》")[0] if '《' in soup_child.select(
            ".title_all")[0].text else soup_child.select(".title_all")[0].text
        print(data_child["url"])
        data_child['cover_pic'] = self.HOST + soup_child.select("div.co_content8 td img")[0]['src'] if 'https://' not in \
                                                                                                       soup_child.select(
                                                                                                           "div.co_content8 td img")[
                                                                                                           0][
                                                                                                           'src'] else \
            soup_child.select("div.co_content8 td img")[0]['src'].replace("https://img.18qweasd.com/",
                                                                          "https://www.dygod.net/")  # 封面图
        data_child["pics"] = self.HOST + soup_child.select("div.co_content8 td img")[1]['src'] if 'https://' not in \
                                                                                                  soup_child.select(
                                                                                                      "div.co_content8 td img")[
                                                                                                      1]['src'] else \
        soup_child.select("div.co_content8 td img")[1]['src'].replace("https://img.18qweasd.com/",
                                                                      "https://www.dygod.net/")
        data_child['magnet_url'] = soup_child.select("#downlist table td a")[0]['href'].split('&')[0]  # 磁力链接
        # re.compile("magnet:?")正则

        for i, n in enumerate(soup_child.select("div.co_content8 td")[1].text.split("◎")):
            if "主\u3000\u3000演" in n:
                data_child['actor'] = '//'.join(n[5:].split("\u3000\u3000\u3000\u3000\u3000\u3000"))
            if "简\u3000\u3000介" in n:
                data_child['desc'] = n[6:]
        data_child['pub_date'] = parse(soup_child.body.div.div.span.text[5:]).strftime(
            "%Y-%m-%d %H:%M:%S")  # 来源网站发布日期
        data_child['source'] = self.HOST
        data_child['hash'] = hashlib.md5(data_child['url'].encode("utf-8")).hexdigest()  # 唯一值
        data_child['updated_time'] = data_child['created_time'] = getCurrentTime()
        # data_child['pubdate']=i.select("td")
        # target[0].select("td")[5].text                            # 电影描述
        # self.HOST+url                                                  # 来源
        # self.HOST+target_url
        # parse(soup_child.body.div.div.span.text[5:])              # 发布时间
        resp_child.close()
        return data_child
