#coding:utf-8
#解析数据
from bs4 import BeautifulSoup
import os
import pymysql as mysql
import string
import requests
import configparser
import re
import math
import time
import io
import sqlite3
import shutil



htmlroot = r'E:\down_data\wanli\20180423\html'#文件根目录
coverroot = r'E:\down_data\wanli\20180423\cover'#图片目录
ref = r"E:\down_data\wanli\ref\zt_template.db3"
DB3FILE = os.path.join(r"E:\down_data\wanli\db", 'mirrorzjwuchineseallbook_' + time.strftime('%Y%m%d',time.localtime(time.time())) + '.db3')
shutil.copyfile(ref, DB3FILE)
def write_sql(sqlList):
	conn = sqlite3.connect(DB3FILE)
	cur = conn.cursor()
	for sql in sqlList:
		try:
			cur.execute(sql)
		except:
			print(sql)
	conn.commit()
	conn.close()

def get_data(filepath,filename):
	with open(filepath,'r',encoding = "utf-8") as f:
		html = f.read()
	soup = BeautifulSoup(html,'lxml')
	div = soup.find("div",class_="module1")
	title =soup.find("div",class_ = "title")
	title = title.text
	title = title.replace("'",'"')
	date = '1900'
	date_created = '19000000'
	creator = ""
	publisher = ""
	bookclass = ""
	issn = ""
	div1 = soup.find("div",class_ = "recommended_book")
	provider_subject = div1.text
	provider_subject = provider_subject.replace("\n","")
	provider_subject = provider_subject.replace("全部图书资源>","")
	
	for liTag in div.find_all('span'):
		text = ''.join(liTag.stripped_strings)
		if text.startswith("出版日期："):
			date_created = text[len('出版日期：'):]
			date_created = date_created.split(" ")
			date_created = date_created[0]
			date_created = date_created.replace("年",'').replace('月','').replace('日','')
			date_created1 = date_created.split("/")
			if len(date_created1) == 3:
				date = date_created1[0]
				yue = "00"+date_created1[1]
				day =  "00"+date_created1[2]
				date_created = date + yue[-2:]+day[-2:]
			date_created2 = date_created.split("-")
			if len(date_created2) == 3:
				date = date_created2[0]
				yue = "00"+date_created2[1]
				day =  "00"+date_created2[2]
				date_created = date + yue[-2:]+day[-2:]
					
	
			if len(date_created)<4:
				date = '1900'
				date_created = '19000000'
			elif len(date_created) == 4:
				date = date_created[:4]
				date_created = date_created+'0000'
			elif len(date_created) == 6:
				date = date_created[:4]
				date_created = date_created+'00'
		if text.startswith("作者："):
			creator = text[len('作者：'):]
			creator =re.sub(r'\s*[,，；]\s*', ';', creator)
			creator = re.sub(r'\s+(?:主编|编著|著|改编|编绘)$', '', creator)
			creator = creator.replace("'",'"').replace(") ",")").replace("  "," ").replace(" ",";").replace(",",";")
		if text.startswith("出版社："):
			publisher = text[len('出版社：'):]
			publisher = publisher.replace("'",'"')
		if text.startswith("ISBN："):
			issn = text[len('ISBN：'):]
			issn = issn.replace("-","").strip(r'?/?').strip(" ")
		if text.startswith("中图分类："):
			bookclass = text[len('中图分类：'):]
			bookclass = bookclass.split("(")
			bookclass = bookclass[0].replace("'",'"').replace("；",";").replace(" ","")
			bookclass = bookclass.strip()
			# print(bookclass)
			
	# print(filename,bookclass)
	cover = ""
	provider = "mirrorzjwuchineseallbook"
	country = "CN"
	type = "1"
	medium = "2"
	language = "ZH"
	
	country = "CN"
	rawid = filename[:-5]
	provider_id = "mirrorzjwuchineseallbook@"+rawid
	lngid = "MIRROR_ZJWU_CHINESEALL_TS_"+rawid
	provider_url = "mirrorzjwuchineseallbook@http://10.60.154.30:8089/bookDetail.action?objectId="+rawid
	
	coverroot = os.path.join(r'E:\down_data\wanli\20180423\cover',date_created[:6]+ '/' + rawid+".jpg")

	if  os.path.exists(coverroot):
		cover = '/smartlib/mirrorzjwuchineseallbook/' + date_created[:6]+ '/' + rawid+".jpg"

	batch = time.strftime('%Y%m%d',time.localtime(time.time()))
	batch = batch + '00'
	sql = "insert  into modify_title_info_zt(cover,lngid,batch,provider_url,provider,provider_id,rawid,title,date,type,medium,language,country,creator,publisher,date_created,subject_clc,identifier_pisbn,provider_subject) values ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" %(cover,lngid,batch,provider_url,provider,provider_id,rawid,title,date,type,medium,language,country,creator,publisher,date_created,bookclass,issn,provider_subject)
	
	return sql
	
if __name__=="__main__":
	sqlList = []
	cnt = 0
	for parent, dirnames, filenames in os.walk(htmlroot):
		for filename in filenames:
			if not filename.endswith('.html'):
				continue
			file_root = os.path.join(parent, filename)
			sql = get_data(file_root,filename)
			sqlList.append(sql)
			if cnt%500 ==0:
				write_sql(sqlList)
				sqlList = []
				print(cnt)
			cnt+=1
	write_sql(sqlList)