import urllib.request
from bs4 import BeautifulSoup

#得到网页源代码
#参数：网址
#返回：网页源代码
def GetHtmlCode(url):
    # 解析网页
    try:
        response = urllib.request.urlopen(url)
    except urllib.error.URLError as e:
        print("error2: 网络连接超时",e)
        return None
    if response.getcode() != 200:
        print("error1: 打开网页失败，请检查您的网络！")
        return None
    content_html = response.read()
    # print(content_html.decode('utf8'))  # 解码
    return content_html

#得到电影链接和标题
#参数：网页源码
#返回：列表[标题，链接]
def GetLinkTitle(c_html):
    soup = BeautifulSoup(c_html, 'html.parser', from_encoding='utf-8')
    movies = soup.find_all('h4', class_="sea-text-hidden text-left")
    list = []
    for movie in movies:
        title = movie.find('a')
        link = front_url + title['href']
        t = title.get_text()
        # print(t.decode("utf-8"))
        list.append([t, link])
    return list

#得到电影内容
#参数：列表[标题,链接]
#输出：标题，内容，图片链接
def GetMovieContent(link):
    c_html = GetHtmlCode(link[1])
    soup = BeautifulSoup(c_html, 'html.parser', from_encoding='utf-8')
    content_all = soup.find('table').find_all('td',align="left",border="0")

    pic = content_all[0].find('img')['src']#图片链接
    summary_all = content_all[1].find_all('div')#简介
    summary = ""
    for s in summary_all:
        summary += "<p>" + s.get_text() + "</p>\n"
    summary += AD

    #观看链接
    watchLinks = content_all[4].find_all('div',id="content_jr")
    for w in watchLinks:
        w = w.find_all('li')
        #观看方式
        watchType = w[0].get_text()
        watchLink = w[1].find('a')['href']
        summary += "<p><b>"+watchType+"</b></p>"
        summary += "<p><a href=\""+watchLink+"\">"+link[0]+"</a></p>"
    # print("title: ", link[0])
    # print("pic: ", pic)
    # print("summary: ", summary)
    summary += TEACH
    return link[0],summary,pic

if __name__ =='__main__':
    # root_url = 'http://baike.sogou.com/v158023505.htm'
    root_url = 'http://www.mp4pa.com/dy/hd37.html'
    front_url = 'http://www.mp4pa.com'
    # 得到网页源代码
    content_html = GetHtmlCode(root_url)
    if content_html is None:
        exit(0)
    #解析网页 获取链接和标题
    list = GetLinkTitle(content_html) # list.append(["title","http://"])
    #针对每一个链接去提取内容

    for li in list[::-1]:
        title,summary,pic = GetMovieContent(li)
        # 判断标题是否存在于数据库中
        # 不存在，则更新到数据库中
        Up.UpdateSql(title,summary,pic)