import requests
from bs4 import BeautifulSoup
import re
from hashlib import md5
import os
from utils.django_setup import setup_django_model
from apis_1_0.models import Movie
setup_django_model()
import time


class MovieCrawler(object):
    # todo: 修改成异步爬取 存入MYSQL的结构
    MAX_PAGE_INDEX = 204

    def __init__(self):
        self.model_fields = {field.name: field.verbose_name for field in Movie._meta.fields[1:]}
        self._dytt_movie_list_url = "https://www.ygdy8.net/html/gndy/dyzz/list_23_{}.html"
        self.movie_info_url_list = []

    def crawl_movie_list(self):
        movie_list_url = self._dytt_movie_list_url

        for index in range(1, self.MAX_PAGE_INDEX)[6:31]:
            url = movie_list_url.format(index)
            print("[MovieCrawler] Crawling URL:{}".format(url))
            ret = requests.get(url, timeout=10)
            ret.encoding = requests.utils.get_encodings_from_content(ret.text)[0]
            soup = BeautifulSoup(ret.text, 'lxml')
            for movie in soup.select("div.co_content8 ul table a"):
                movie_info_url = "https://www.ygdy8.net" + movie.attrs['href']
                print(f"movie_info_url:{movie_info_url}")
                self.movie_info_url_list.append(movie_info_url)
                self.crawl_movie_info(movie_info_url)
        time.sleep(10)

    def crawl_movie_info(self, url):
        try:
            ret = requests.get(url, timeout=10)
        except Exception as e:
            print(f"Crawl movie url: {url} FAILED....")
            return ""
        ret.encoding = requests.utils.get_encodings_from_content(ret.text)[0]
        soup = BeautifulSoup(ret.text, 'lxml')
        img_md5 = ""
        try:
            img_url = soup.select("div#Zoom img")[0].get("src")
            # 下载图片到本地
            img_content = requests.get(img_url).content
            img_name = md5(img_url.encode()).hexdigest() + ".jpg"
            img_md5 = img_name.split(".")[0]
            print(img_name)
            path = os.path.join(r"E:\pycharm_projects\douban_crawler\wx_fronntend\resources\images\movies", img_name)
            with open(path, "wb") as f:
                f.write(img_content)
        except:
            pass

        text = " ".join([p_tag.text for p_tag in soup.select('div#Zoom p ')])

        retrieved_info = self.retrieve_movie_information(text)
        if retrieved_info:
            movie_obj = Movie()
            movie_obj.download_url = url

            movie_obj.img_md5 = img_md5
            for field, content in retrieved_info.items():
                print(f"movie_obj.{field}='{content}'")
                try:
                    exec(fr"movie_obj.{field}='{content}'")
                except Exception as e:
                    exec(fr"movie_obj.{field}='INSERTION ERROR!!-> {e}'")
            movie_obj.save()
        # except Exception as e2:
        #     print(e2)
        # except Exception as img_urle2:
        #     img_url = ""
        #     print(f"error crawl movie info...")

    def retrieve_movie_information(self, text):
        text = text.replace("\u3000", " ")
        text = text[:text.index("下载地址")-1]
        info_list = re.split("◎", text)[1:]
        model_fields = self.model_fields
        if not len(info_list) in range(19, 21):
            print("有问题！！")
        try:
            ch_fields = [re.match(r"^(.{4,5}|IMDb评分)\s(.*)", ret).group(1).replace(" ", "") for ret in info_list]
            conflict_fields = [field for field in ch_fields if field not in model_fields.values()]
            print(conflict_fields)
            for field in conflict_fields:
                ch_fields.remove(field)
        except Exception as e:
            print(f"error: {e}")
            ch_fields = [re.match(r"^(.{4,5}|IMDb评分)\s.*", ret) for ret in info_list]
            print(ch_fields)
            return None
        # en_fields = [movie_field.name for field, movie_field in zip(ch_fields, Movie._meta.fields[1:])
        #              if movie_field.verbose_name == field]
        en_fields = []
        for field in ch_fields:
            for name, verbose_name in model_fields.items():
                if field == verbose_name:
                    en_fields.append(name)

        movie_info = [re.match(r"^(.{4,5}|IMDb评分)\s(.*)", ret).group(2).replace(" ", "") for ret in info_list]
        ret_dic = {key: val for key, val in zip(en_fields, movie_info)}
        # if ret_dic.get("rewards", None):
        #     ret_dic["rewards"] = ret_dic["rewards"][:ret_dic["rewards"].index("下载地址") - 1]
        return ret_dic



if __name__ == "__main__":
    # infos = []
    movie_crawler = MovieCrawler()
    movie_crawler.crawl_movie_list()
    # print(Movie._meta.fields)
    # for msg in Movie._meta.fields[1:]:
    #     print(msg.name, msg.verbose_name)



    pass

