from urllib import request
import requests
import re
import json
from lxml import etree
import csv
import pymongo
import pymysql

class ScrapeSpider:
	def __init__(self):
		pass

	def request_by_urllib(self, url):
		return request.urlopen(url).read().decode()

	def request_by_requests(self, url):
		res = requests.get(url)
		return res.text

	def parse_by_json(self):
		pass

	def parse_by_re(self, content):
		items = re.findall(r'CreativeWork">(.*?)</div>', content, re.S)
		datas = []
		for item in items:
			content = re.search(r'<span class="text" itemprop="text">(.*?)</span>', item, re.S).group(1)[1:-1]
			author = re.search(r'"author">(.*?)</small>', item, re.S).group(1)
			tags = re.findall(r'<a class="tag" href=".*?">(.*?)</a>', item, re.S)
			datas.append({
				"content": content,
				"author": author,
				"tags": tags
			})
		return datas
	def parse_by_lxml(self, html):
		tree = etree.HTML(html)
		items = tree.xpath('//div[@class="quote"]')
		datas = []
		for item in items:
			content = item.xpath('./span[@class="text"]/text()')[0][1:-1]
			author = item.xpath('.//small[@class="author"]/text()')[0]
			tags = item.xpath('.//a[@class="tag"]/text()')
			datas.append({
				"content": content,
				"author": author,
				"tags": tags
			})
		return datas
	def save_to_csv(self):
		pass

	def save_to_json(self,datas):
		with open(f"data.json", "w") as f:
			json.dump(datas, f)

	def save_to_mysql(self,datas):
		con = pymysql.Connect(user="root", password="123456")
		cur = con.cursor()
		cur.execute("create  database if not exists scrape charset=utf8")
		cur.execute("use scrape")
		cur.execute('create table if not exists items (id int not null primary key auto_increment, content varchar(500) not null, author varchar(50) not null)')
		cur.execute('create table if not exists tags (id int not null primary key auto_increment, title varchar(50) not null, items_id int not null, constraint fk_items_id foreign key (items_id) references items(id) on delete cascade on update cascade )')
		cur.execute('delete from tags')
		cur.execute('delete from items')
		con.commit()
		item_id = 100
		for data in datas:
			cur.execute('insert into items values (%s,%s,%s)', args=(item_id, data['content'], data['author']))
			tags = [(0, tag, item_id) for tag in data['tags']]
			cur.executemany('insert into tags values (%s,%s,%s)', args=tags)

			item_id += 1
		con.commit()
		con.close()

	def save_to_mongodb(self, datas):
		client = pymongo.MongoClient()
		db = client.get_database("scrape")
		db.drop_collection("items")
		collection = db.get_collection("items")
		collection.insert_many(datas)
		client.close()


ss = ScrapeSpider()
# content = ss.request_by_requests("https://quotes.toscrape.com/page/1/")
content = ss.request_by_urllib("https://quotes.toscrape.com/page/1/")

# datas = ss.parse_by_lxml(content)
datas = ss.parse_by_re(content)

# ss.save_to_mongodb(datas)
# ss.save_to_mysql(datas)
ss.save_to_json(datas)