import urllib,gzip,re,requests,csv,time,os

"""
本文件将爬虫信息封装成类
每抓取一页睡一秒

"""

class Collect(object):
	"""采集信息用到的类"""
	def __init__(self):
		# 
		self.url = 'https://maoyan.com/board/4?offset='
		self.page = 0

	def totalpage(self, page):
		self.page = int(page)

	def getUrl(self, page):
		offset = str(page * 10)
		url = self.url + offset
		return url

	def getOnePage(self, page):
		# 获取单页数据
		user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
		headers = {'User-Agent': user_agent}
		url = self.getUrl(page)
		res = requests.get(url, headers = headers)
		html = res.content.decode('utf-8')
		# 序号、图片、电影名称、主演、时间、评分
		reg = '<dd>[\s\S]*?<i class="board-index board-index-.*?">(.*)</i>[\s\S]*?<a[\s\S]*?class="image-link"[\s\S]*?>[\s\S]*?<img data-src="(.*?)"[\s\S]*?class="board-img" />[\s\S]*?<a[\s\S]*?data-act="boarditem-click"[\s\S]*?>(.*)</a></p>[\s\S]*?<p class="star">([\s\S]*?)</p>[\s\S]*?<p class="releasetime">([\s\S]*?)</p>[\s\S]*?<p class="score"><i class="integer">(.*)</i><i class="fraction">(.*)</i></p>[\s\S]*?</dd>'
		dataList = re.findall(reg, html)

		for key,val in enumerate(dataList):
			tmp = []
			for i in range(4):
				tmp.append(val[i].strip())
			tmp.append(val[5] + val[6])
			dataList[key] = tmp
		return dataList


	def getDataList(self):
		dataList = []
		if self.page >= 0:
			for page in range(self.page):
				data = self.getOnePage(page)
				dataList += data
				print(len(dataList))
				time.sleep(1)
		else:
			exit()
		self.dataList = dataList

	def writeToCsv(self, filename):
		# 将数据写入文件
		file = open(filename, 'a+', newline='')
		handler = csv.writer(file, dialect='excel')
		handler.writerow(['序号','图片','电影名称','主演','时间','评分']) 
		for i in self.dataList:
			handler.writerow(i)
		file.close()


if __name__ == '__main__':
    data = Collect()
    key = input('抓取多少页: ')
    data.totalpage(key)
    filename = 'maoyan.csv'
    data.getDataList()
    data.writeToCsv(filename)
    print(filename + " is writen.")