import urllib.request
from bs4 import BeautifulSoup
import requests
#得到网页源代码
#参数：网址
#返回：网页源代码
def GetHtmlCode(url):
    # user_agent = "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Mobile Safari/537.36"
    # headers = {'User-Agent': user_agent}
    # 解析网页
    try:
        # req = urllib.Request(url, None, headers)
        response = urllib.request.urlopen(url)
        # response = urllib.request.urlopen(url,None,headers)
        print(response.getcode())
    except urllib.error.URLError as e:
        print("error2: 网络连接超时",e)
        return None
    if response.getcode() != 200:
        print("error1: 打开网页失败，请检查您的网络！")
        return None
    content_html = response.read()
    return content_html

#得到标题
#参数：网页源码
#返回：列表[标题]
def GetTitle(c_html,n):
    soup = BeautifulSoup(c_html, 'html.parser', from_encoding='utf-8')
    titles = soup.find_all('a', class_="list-title",target="_blank")
    list = []
    example_file = open("text/example.txt","a")
    count = 0
    for title in titles:
        t = title.get_text()
        example_file.write(t+"\n")
        count += 1
        print(t)
        if count >= n:
            break
    return list

#得到电影内容
#参数：列表[标题,链接]
#输出：标题，内容，图片链接
def GetMovieContent(link):
    c_html = GetHtmlCode(link[1])
    soup = BeautifulSoup(c_html, 'html.parser', from_encoding='utf-8')
    content_all = soup.find('table').find_all('td',align="left",border="0")

    pic = content_all[0].find('img')['src']#图片链接
    summary_all = content_all[1].find_all('div')#简介
    summary = ""
    for s in summary_all:
        summary += "<p>" + s.get_text() + "</p>\n"

    #观看链接
    watchLinks = content_all[4].find_all('div',id="content_jr")
    for w in watchLinks:
        w = w.find_all('li')
        #观看方式
        watchType = w[0].get_text()
        watchLink = w[1].find('a')['href']
        summary += "<p><b>"+watchType+"</b></p>"
        summary += "<p><a href=\""+watchLink+"\">"+link[0]+"</a></p>"
    # print("title: ", link[0])
    # print("pic: ", pic)
    # print("summary: ", summary)
    return link[0],summary,pic

if __name__ =='__main__':
    example_url = "http://top.baidu.com/buzz?b=42&c=513&fr=topbuzz_b1"
    example_url2 = "http://top.baidu.com/buzz?b=26&fr=topboards"
    test_url = "http://www.baidu.com/link?url=WunhBOY2gi8dMSBd2TRnjGxnmAkA_RfjdrXcweWSEDdC5pVCCpwjzRn1ib-ONk-w"
    test_url = "http://www.baidu.com/link?url=jIjnKRNQeCtarNu8sqlzV1rBP2yVOfVDIcmy4vO5JoKYOwZjAzXx4yYMLzNV1cxT88K3Xxe4M14_R-ugkFx5J7zAwUQiINBAEhFg6YQPWy3"

    user_agent = "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Mobile Safari/537.36"
    headers = {'User-Agent': user_agent}
    # t = requests.get(test_url)
    # print(t.url)
    s = requests.Session()
    s.headers['User-Agent'] = "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Mobile Safari/537.36"
    r = s.get(test_url)
    print(r.url)
    # # 得到网页源代码
    # content_html = GetHtmlCode(test_url)
    # if content_html is None:
    #     exit(0)
    # print(content_html)
    # #解析网页 获取样例标题
    # example_list = GetTitle(content_html,30)

