# coding = UTF-8
# @Time : 2021/11/14 01:20
# @Author : PP_YY
# @File : spider_P.py
# @Version : 0.0.1
import re #正则表达式模块import re #正则表达式模块
import sys #系统模块
from bs4 import BeautifulSoup #进行数据获取，网页解析
import urllib.request,urllib.parse#指定url获取数据
import xlwt #进行excel操作
import sqlite3 #进行sqlite3操作
import pathlib
import os

findlink = re.compile(r'<a href="(.*?)">') #正则，用来后面搜索电影链接的正则表达式
findImg = re.compile(r'<img.*src="(.*?)"',re.S)#查找图片链接的正则表达式
findTitle = re.compile(r'<span class="title">(.*?)</span>')#影片片名
findscore = re.compile(r'<span class="rating_num" property="v:average">(.*?)</span>')#平分
findpeoplenum = re.compile(r'<span>(\d*)人评价</span>')# 评价人数
findinq = re.compile(r'<span class="inq">(.*?)</span>')# 简述
findinfo = re.compile(r'<p class="">(.*?)</p>',re.S)# 相关内容，忽视换行符


def getData(_baseurl): #从url中爬起数据，解析数据
    datalist = []
    for i in range(0,10):
        url  = _baseurl + str(i*25)
        html = askurl(url)
        #逐一解析数据
        soup = BeautifulSoup(html,"html.parser")
        for item in soup.find_all('div',class_="item"): #查找符合要求的字符，形成列表
            # print(item) #测试，查看item
            data = [] #保存所有信息的列表
            item = str(item) # 转换成字符串，方便用re查找
            link = (re.findall(findlink, item))[0]# 找到所有的链接
            data.append(link) # 加入data中

            Img = (re.findall(findImg, item))[0]
            data.append(Img) # 图片

            title = (re.findall(findTitle, item))# 标题
            if len(title) == 2:# 外文明的情况
                data.append(title[0])
                title[1] = title[1].replace("/",'')
                data.append(title[1])
            else:
                data.append(title[0])
                data.append(" ")
            
            score = re.findall(findscore, item)[0]# 评分
            data.append(score)
            

            inq = re.findall(findinq, item)# 一句话简述
            if len(inq) != 0:
                data.append(inq[0])
            else:
                data.append(" ")

            info = re.findall(findinfo, item)# 相关信息(导演，演员)
            info = re.sub('<br(\s+)?/>(\s+)?'," ",str(info[0])) # 把<br/>去掉
            data.append(info.strip()) # 去掉空格

            datalist.append(data)
    # print(datalist)
    return datalist

#得到一个指定的url网页内容
def askurl(url):
    head = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/79.0.3945.147 Safari/534.24 Device/elish Model/M2105K81AC XiaoMi/MiuiBrowser/14.6.50"}
    request_t = urllib.request.Request(url,headers=head)
    try:
        html = urllib.request.urlopen(request_t)
    except urllib.error.URLError as e1:
        print(e1)
    except urllib.error.HTTPError as e2:
        print(e2)
    # print(html.read().decode())
    return(html)


def savedatatoDB(_datalist,_dbpath):
    if init_DB(_dbpath):
        connect = sqlite3.connect(_dbpath)
        cursor = connect.cursor()
        for data in _datalist:
            for index in range(len(data)):
                if index != 4:
                    data[index] = '"' + data[index] + '"'
            insert_sql = '''
                insert into movie250
                (
                    info_link,
                    pic_link, 
                    Cname, 
                    Fname, 
                    score, 
                    introduction, 
                    info 
                )
                values(%s)
            ''' %",".join(data)
            # print(insert_sql)
            cursor.execute(insert_sql)
        connect.commit()
        cursor.close()
        connect.close()
        print("save to database successfully!")


def init_DB(_dbpath): #创建数据库
    if pathlib.Path(_dbpath).exists():
        os.remove(_dbpath)
    try:
        sql = '''
        create table movie250
        (
            id integer primary key autoincrement,
            info_link text,
            pic_link text,
            Cname varchar,
            Fname varchar,
            score numeric,
            introduction text,
            info text
        )
        '''
        connect = sqlite3.connect(_dbpath)
        cursor = connect.cursor()
        cursor.execute(sql)
        connect.commit()
        cursor.close()
        connect.close()
        return True
    except:
        print("error")
        return False
    


def saveData(_datalist ,_savepath): #保存
    save = xlwt.Workbook(encoding="utf-8",style_compression=0)# 创建workbook对象，指定编码格式和样式压缩
    sheet = save.add_sheet("豆瓣top250",cell_overwrite_ok=True)# 创建表，表名，和是否覆盖
    title = ["电影链接","海报链接","片名","外文名","评分","简评","相关信息"]
    for i in range(len(title)):
        sheet.write(0,i,title[i]) # write写入信息
    for i in range(0,len(_datalist)):
        print(f"正在保存第{i+1}条信息")
        for j in range(len(_datalist[i])):
            sheet.write(i+1,j,_datalist[i][j])
    save.save(_savepath)

def main():
    baseurl = "https://movie.douban.com/top250?start="
    #1.爬取网页
    datalist = getData(baseurl)
    #3.保存数据
    savepath = "/home/Codes/python/潘猪的爬虫小学习_P/豆瓣top250.xls"
    databasepath = "/home/Codes/python/潘猪的爬虫小学习_P/豆瓣top250.db"
    savedatatoDB(datalist, databasepath)
    # saveData(datalist,savepath)


def show_database(_dbpath):
    connect = sqlite3.connect(_dbpath)
    cursor = connect.cursor()
    select = '''
    select * from movie250

    '''
    data = cursor.execute(select)
    for item in data:
        print(item)
    connect.commit()
    cursor.close()



if __name__ == "__main__":
    main()
    # show_database("/home/Codes/python/潘猪的爬虫小学习_P/豆瓣top250.db")
    # init_DB("/home/Codes/python/潘猪的爬虫小学习_P/豆瓣top250.db")
