from bs4 import BeautifulSoup # 网页解析
import re # 正则表达式
import urllib.request, urllib.error # 指定URL，获取网页数据
import xlwt # 进行Excel操作
import sqlite3 # 进行SQLite数据库操作
import random

# 从html中找到需要的数据的正则表达式
findLink = re.compile(r'<a class="" href="(.*?)">')
findPic = re.compile(r'<img.*src="(.*?)" width="100"/>')
findTitle = re.compile(r'<span class="title">(.*)</span>')
findSurvey = re.compile(r'<p class="">(.*?)</p>', re.S)
findRateNum = re.compile(r'<span class="rating_num".*>(.*?)</span>')
findCommentNum = re.compile(r'<span>(.*?)人评价</span>')
findInq = re.compile(r'<span class="inq">(.*?)</span>')

# 代理服务器的地址, 使用随机数，随机选一个代理服务器
proxy_addr = ["175.44.109.39:9999", "1.197.203.36:9999", "123.163.118.147:9999",
                "183.166.70.206:9999","119.120.22.233:9999","114.239.144.75:9999",
                "112.83.170.162:9999","115.218.2.52:9999","182.87.45.116:9999",]

# saveExcelPath = "movie.xls"
saveSqlitePath = "movie.db"

def main():
    allData = []
    # 设置代理
    use_proxy()
   # 1. 爬取网页获取数据 
    for i in range(0,10):
        url = "https://movie.douban.com/top250?start="
        url = url+str(i*25)
        html = askUrl(url)
        data = getData(html)
        allData = allData + data
    
    # print(allData)

    # 2. 保存数据到excel表格中
    #saveDataInExcel(allData, saveExcelPath)

    # 3. 保存数数据到sqlite数据库中
    saveDataInSqlite(allData, saveSqlitePath)


# 设置代理,防止被服务器器检测封IP    
def use_proxy():
    addr = proxy_addr[random.randint(0, 8)]
    proxy = urllib.request.ProxyHandler({'http': addr})
    opener = urllib.request.build_opener(proxy, urllib.request.HTTPHandler)
    urllib.request.install_opener(opener)


# 初始化sqlite数据库
def init_db(dbpath):
    sql = '''
        create table movie
        (id integer primary key autoincrement,
        info_link text,
        pic_link text,
        cname varchar,
        ename varchar,
        score numeric,
        rated numeric,
        instroduction text,
        info text)
    '''

    con = sqlite3.connect(dbpath)
    cursor = con.cursor()
    cursor.execute(sql)
    con.commit()
    con.close()


# 访问url
def askUrl(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Mobile Safari/537.36 Edg/87.0.664.60"
    }
    req = urllib.request.Request(url=url, headers=headers)
    html = ""
    try:
        response = urllib.request.urlopen(req, timeout=3)
        html = response.read().decode(encoding="utf-8")
        getData(html)
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
        print("request timeout!")
    return html


# 从html中解析数据
def getData(html):
    bs = BeautifulSoup(html, "html.parser")
    datalist = []
    for item in bs.findAll("div", class_="item"):
        movie = str(item)
        data = []
        # 电影的链接
        link = re.findall(findLink, movie)
        #图片的链接
        pic = re.findall(findPic, movie)

        # 电影的名称
        title = re.findall(findTitle, movie)
        if len(title) == 2: 
            tmp = re.sub("/", "", title[1])
            title[1]= tmp.replace("\xa0", "") 
        else:
            title.append(" ")

        # 电影的概况
        survey = re.findall(findSurvey, movie)
        survey[0] = survey[0].replace("<br/>", "")
        survey[0] = " ".join(survey[0].split())
        survey[0] = survey[0].strip()
        
        # 电影评分
        rateNum = re.findall(findRateNum, movie)
        
        # 电影评价人数
        commentNum = re.findall(findCommentNum, movie)
        
        # 电影的描述
        inq = [] 
        inq = re.findall(findInq, movie)
        if len(inq) == 1:
            tmp = inq[0]
            tmp = tmp.replace("。", "")
            inq[0] = tmp
        else:
            inq.append(" ")

        data.append(link[0])
        data.append(pic[0])
        data.append(title[0])
        data.append(title[1])
        data.append(rateNum[0])
        data.append(commentNum[0])
        data.append(survey[0])
        data.append(inq[0])
        datalist.append(data)
    
    return datalist
    
    
# 保存数据到Excel中
def saveDataInExcel(data, path):
    workbook = xlwt.Workbook(encoding="utf-8")
    worksheet = workbook.add_sheet("movie")
    col = ["电影详情页", "图片链接", "影片中文名", "影片英文名","评分","评论数","概况","相关信息"]
    for i in range(0, 8):
        worksheet.write(0, i, col[i])
    
    for i in range(0, len(data)):
        for j in range(8):
            worksheet.write(i+1, j, data[i][j])
    workbook.save(path)


# 保存数据到Sqlite数据库中
def saveDataInSqlite(data, path):
    init_db(path)
    conn = sqlite3.connect(path)
    cursor = conn.cursor()
    for i in range(0, len(data)):
        for j in range (0,len(data[i])):
            if j == 4 or j == 5:
                continue
            data[i][j] = '"'+data[i][j]+'"'
        sql = """
            insert into
            movie (info_link,pic_link,cname,ename,score,rated,instroduction,info)
            values(%s)
        """ % (','.join(data[i]))
        cursor.execute(sql)
    conn.commit()
    conn.close()        
           

if __name__ == "__main__":
    main()