import requests
from bs4 import BeautifulSoup

import re
import json

import random

import time
import os
import subprocess

DEFAULT={
    "json":"__douban_info.json",
    "poster":"__poster", # 注意 下载的时候 补上后缀名
    "req":{
        "headers": {
            'Referer': 'https://www.douban.com',
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
        },
        "timeout":10
    }
}

def _get_suffix(url,default=""):
    _,ext=os.path.splitext(url)
    if ext=="":
        return default
    return ext

def _httpGet(url):
    return requests.get(url, headers=DEFAULT["req"]["headers"],timeout=DEFAULT["req"]["timeout"])


def _ffmpeg_image(src,dst):
    try:
        ffmpeg_cmd=["ffmpeg",'-v','error', "-i",src,dst]
        subprocess.run(ffmpeg_cmd)
        if subprocess.returncode==0:
            print("[I] ffmpeg transformat to '.jpg' OK")
        pass
    except:
        # maybe ffmpeg not installed
        pass

def get_web_image(dirname,data,index,post_hook=None):
    url=data["poster"]
    suffix=_get_suffix(url,".jpg")
    image=dirname+"/__poster"+suffix

    if index>0:
        image="%s.%d"%(image,index)

    res=_httpGet(url)
    if res.status_code!=200 or not res.content:
        print("[E] download poster image error I")

    try:
        n=0
        with open(image,"wb") as f:
            n=f.write(res.content)
            data["have_poster"]=True
            print("[I] get poster image success len = ",n)
        if n==0:
            print("############# write length == 0 ...#############")
        elif suffix!=".jpg":
            _ffmpeg_image(image,dirname+"/__poster.jpg")
    except Exception as e:
        print(e)
        data["have_poster"]=False
        print("[E] download poster image error II")
        pass
    finally:
        if post_hook:
            post_hook()
    

def _get_subjectID(s):
    # subject%2F1292052%2F
    # subject%2F       %2F
    #
    res=re.search("%2Fsubject%2F(\d+)%2F",s,re.I)
    if res :
        return res.groups()[0]

    return None

def _get_douban_id(kw):
    print("[Info] try to get subject ID by kw = ",kw)

    if not kw:
        return None
    
    # cat=1002 是 (电影 和 电视剧).
    url = f"https://www.douban.com/search?cat=1002&q={kw}"

    response = _httpGet(url)
    soup = BeautifulSoup(response.text, "html.parser")
    result = soup.select_one("div.result-list > div.result > div.pic > a.nbg")
    if result:
        href = result["href"]
        if href:
            douban_id = _get_subjectID(href)
            return douban_id

    return None

def _get_region(s):
    # subject%2F1292052%2F
    # subject%2F       %2F
    #
    res=re.search(r'<span class="pl">制片国家\/地区:<\/span>[\s\n]*([^<]+)',s,re.I)
    if res :
        return res.groups()[0]

    return None

def get_douban_info(name,subjectid=None):
    data={}

    if not subjectid:
        keywords=[]
        kw=re.sub("[_\.\-]"," ",name)
        keywords.append(kw)
        name=kw
        if kw not in keywords:
            keywords.append(kw)
        for sep in (".","-","_"," "):
            kw=name.split(sep)[0]
            if kw not in keywords:
                keywords.append(kw)

        for kw in keywords:
            subjectid=_get_douban_id(kw)
            if subjectid:
                time.sleep(1)
                break
            time.sleep(2+random.random()*2)

        if not subjectid:
            print("[E] can't find subject ID")
            print("")
            return None

    print("[Info] subjectid = ",subjectid)

    data["doubanID"]=subjectid

    print("[Info] try to get subject page BY id")
    url = f"https://movie.douban.com/subject/{subjectid}/"

    response = _httpGet(url)

    if response.status_code !=200:
        print("[E] get subject page err")
        return None

    soup = BeautifulSoup(response.text, "html.parser")
    # 导演
    res = soup.select_one("#info > span:nth-child(1) > span.attrs > a")
    if res:
        data["director"]=res.get_text()

    # poster 海报
    res = soup.select_one("#mainpic > a > img")
    if res:
        posterUrl=res["src"]
        if posterUrl and posterUrl.startswith("https://") or posterUrl.startswith("http://"):
            data["poster"]=posterUrl
        
    # 评分
    res = soup.select_one("#interest_sectl > div.rating_wrap.clearbox > div.rating_self.clearfix > strong")
    if res:
        data["remark"]=res.get_text()

    # 类型
    data["types"]=[]
    info = soup.select_one("#info")
    for i in info.select('span[property^="v:genre"]'):
        data["types"].append(i.get_text())

    data["region"]=_get_region(response.text)

    # 简介  
    res=soup.select_one('#link-report-intra span[property="v:summary"]')
    if res:
        res=res.get_text().strip()
        res=re.sub("   +","",res)
        res=re.sub("\n\n","\n",res)
        data["plot"]=res
    
    # 演员 'meta[property^="video:actor"]'
    data["actor"]=[]
    for i in soup.select('meta[property^="video:actor"]'):
        data["actor"].append(i["content"])    

    # year
    res=soup.select_one(".year").get_text()
    res=re.search("(\d+)",res).groups()[0]
    if res:
        data["year"]=res

    res=soup.select_one("#content > h1 > span:nth-child(1)")
    if res:
        data["name"]=res.get_text()

    return data

def _check_info_obj(o):
    if not o:
        return False
    if o.get("doubanID") and o.get("name") and o.get("poster"):
        return True
    return False

def main(name,dirname):
    data=get_douban_info(name)
    if _check_info_obj(data):
        get_web_image(dirname,data,0)
        return data
    return None

  