# -*- coding:utf-8 -*-
 
# date： 2020-5-10
# author： jingluo
import sys
from bs4 import BeautifulSoup
import sqlite3
import re
import urllib.request, urllib.error
import xlwt

# 搜索规则
findLink = re.compile(r'<a href="(.*?)">')
findImageSrc = re.compile(r'<img.*src="(.*?)"', re.S) # re.S让换行符包含在字符中
findTitle = re.compile(r'<span class="title">(.*)</span>')
findRating = re.compile(r'<span class="rating_num" property="v:average">(.*)</span>')
findJudge = re.compile(r'<span>(\d*)人评价</span>')
findInq = re.compile(r'<span class="inq">(.*)</span>')
findBd = re.compile(r'<p class="">(.*?)</p>', re.S)

def main():
	# 1. 定义爬取网址
	base_url = "https://movie.douban.com/top250?start="
	# 2. 获取数据列表
	data_list = getData(base_url)
	# 3. 定义数据库名称
	dbpath = "movie.db"
	# 4. 存储到sqlite数据库
	saveData2DB(data_list, dbpath)

# 获取数据列表
def getData(base_url):
	data_list = []
	for i in range(0, 10):
		url = base_url + str(i*25)
		html = askURl(url)
		
		# 逐一解析网页
		soup = BeautifulSoup(html, "html.parser")
		for item in soup.find_all("div", class_="item"):
			data = []
			item = str(item)

			link = re.findall(findLink, item)[0]
			data.append(link)
			imgSrc = re.findall(findImageSrc, item)[0]
			data.append(imgSrc)
			titles = re.findall(findTitle, item)
			if len(titles) == 2:
				ctitle = titles[0]
				data.append(ctitle)
				otitle = titles[1].replace("/", "")
				data.append(otitle)
			else:
				data.append(titles[0])
				data.append('')
			rating = re.findall(findRating, item)[0]
			data.append(rating)
			judege = re.findall(findJudge, item)[0]
			data.append(judege)
			inq = re.findall(findInq, item)
			if len(inq) != 0:
				data.append(inq[0].replace("。", ""))
			else:
				data.append('')
			bd = re.findall(findBd, item)[0]
			bd = re.sub('<br(\s+)?/>(\s+)?', " ", bd)
			bd = re.sub('/', " ", bd)
			data.append(bd.strip())
			data_list.append(data)
	return data_list

# 得到指定一个URL的网页内容
def askURl(url):
	# 用户验证信息
	head = {"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"}
	request = urllib.request.Request(url, headers = head)
	html = ""
	try:
		response = urllib.request.urlopen(request)
		html = response.read().decode("utf-8")
	except urllib.error.URLError as e:
		if hasattr(e, "code"):
			print("请求出错",e.code)
		if hasattr(e, "reason"):
			print("错误原因",e.reason)
	return html

# 保存到sqlite数据库中
def saveData2DB(data_list, dbpath):
	init_db(dbpath)
	conn = sqlite3.connect(dbpath)
	cur = conn.cursor()

	for data in data_list:
		for index in range(len(data)):
			if index == 4 or index == 5:
				continue
			data[index] = '"' +data[index] + '"'
		sql = '''
		insert into movie250
		(
		info_link,pic_link,cname,ename,score,rated,instroduction,info
		)
		values(%s)'''%",".join(data)
		cur.execute(sql)
		conn.commit()
	cur.close()
	conn.close()

# 初始化数据库
def init_db(dbpath):
	sql = '''
		create table movie250
		(
		id integer primary key autoincrement,
		info_link text,
		pic_link text,
		cname varchar,
		ename varchar,
		score numeric,
		rated numeric,
		instroduction text,
		info text
		);
	'''
	conn = sqlite3.connect(dbpath)
	cursor = conn.cursor()
	cursor.execute(sql)
	conn.commit()
	conn.close()