import requests
from bs4 import BeautifulSoup
import re
import pymysql
from hashlib import md5
import os


class Movie(object):
    def __init__(self, year, style, name, sharpness):
        self.movie_name = name
        self.movie_year = year
        self.movie_style = style
        self.movie_sharpness = sharpness
        self._url = None

    @property
    def url(self):
        return self._url

    @url.setter
    def url(self, url):
        self._url = url


class DBHelper(object):
    def __init__(self):
        self.connection = pymysql.connect(host="172.17.0.2", user="root", passwd="123", database="movies", charset="utf8")
        self.cursor = self.connection.cursor()

    def insert(self, *params):
        sql = """
        INSERT INTO movies (name, year, style, site, brief_intro, tag, download_url) VALUES ("{}", {}, "{}", "{}", "{}", "{}", "{}");
        """.format(*params)
        print(sql)
        try:
            self.cursor.execute(sql)
            self.connection.commit()
        except Exception as e:
            print("try to execute SQL: {} failed, err info: {}".format(sql, e))

    def query(self, *items):
        sql = """
        SELECT {} from movies;
        """.format(*items)
        print(sql)
        try:
            self.cursor.execute(sql)
            res = self.cursor.fetchall()
        except Exception as e:
            print("query failed, sql: {}, err: {}".format(sql, e))
        return dict(res, key=items)


class MovieCrawler(object):
    MAX_PAGE_INDEX = 204

    def __init__(self):
        self.movie_list = []

    def crawl_movie_list(self):
        movie_list_url = "https://www.ygdy8.net/html/gndy/dyzz/list_23_{}.html"

        for index in range(1, self.MAX_PAGE_INDEX)[:1]:
            url = movie_list_url.format(index)
            print("Crawling URL:{}".format(url))
            ret = requests.get(url)
            ret.encoding = requests.utils.get_encodings_from_content(ret.text)[0]
            soup = BeautifulSoup(ret.text, 'lxml')
            for movie in soup.select("div.co_content8 ul table a"):
                url = "https://www.ygdy8.net" + movie.attrs['href']
                match_obj = re.search(r"(\d+)年(.*)(《.*》)([A-Z]+)", movie.text)
                try:
                    moive_year, move_style, movie_name, movie_sharpness = [match_obj.group(index) for index in range(1, 5)]
                    movie_obj = Movie(moive_year, move_style, movie_name, movie_sharpness)
                    movie_obj.url = url
                    self.movie_list.append(movie_obj)
                except Exception as e:
                    print("Error info:", e, "===> movie name:", movie.text)
                    continue

        return self.movie_list

    def crawl_movie(self, movie_obj):
        url = movie_obj.url
        info = dict()
        try:
            ret = requests.get(url)
        except Exception as e:
            print("Crawl movie: {} failed, err: {}".format(movie_obj.movie_name, e))
            return ""
        ret.encoding = requests.utils.get_encodings_from_content(ret.text)[0]
        soup = BeautifulSoup(ret.text, 'lxml')
        try:
            img_url = soup.select("div#Zoom img")[0].get("src")
            # 下载图片到本地
            img_content = requests.get(img_url).content
            img_name = md5(img_url.encode()).hexdigest() + ".jpg"
            img_md5 = img_name.split(".")[0]
            print(img_name)
            path = os.path.join(r"E:\pycharm_projects\douban_crawler\wx_fronntend\resources\images\movies", img_name)
            with open(path, "wb") as f:
                f.write(img_content)

        except Exception as img_urle2:
            img_url = ""

        text = " ".join([p_tag.text for p_tag in soup.select('div#Zoom p ')])
        info["img_url"] = img_md5
        info["text"] = text
        return info


if __name__ == "__main__":
    db_helper = DBHelper()
    infos = []
    movie_crawler = MovieCrawler()
    movie_list = movie_crawler.crawl_movie_list()
    for movie in movie_list:
        info = movie_crawler.crawl_movie(movie)
        # infos.append(info)
        db_helper.insert(movie.movie_name, int(movie.movie_year), movie.movie_style, "dytt", info["text"], "tag", info["img_url"])
    # all_info = "*************************************\n".join(infos)
    # with open("all_movies.txt", 'w', encoding="utf-8") as f:
    #     f.write(all_info)



    pass

