#coding:utf-8
from bs4 import BeautifulSoup
import os, sys
import string
import re
import time
import sqlite3
import shutil
import configparser

config=configparser.ConfigParser()	
with open("config.ini","r") as cfgfile:
	config.readfp(cfgfile)
	
	ROOT = config.get( "fileroot", "root" )
	
cur_dir_fullpath = os.path.dirname(os.path.abspath(__file__))
StartTime = time.time()



DATAROOT = os.path.join(ROOT, "20180423")
DB3TEMPLATE = r'E:\down_data\博图\ref\template.db3'
 
DB3FILE = os.path.join('E:\down_data\博图\output', 'cqubotubook_' + time.strftime('%Y%m%d',time.localtime(time.time())) + '.db3')
		
def insertData(sqlList):
	conn = sqlite3.connect(DB3FILE)
	cur = conn.cursor()
	for sql in sqlList:
		try:
			cur.execute(sql)
		except:
			print(sql)
			sys.exit(-1)
	conn.commit()
	conn.close()
	
def get_data(parent, filename):
	
	
	file_root = os.path.join(parent, filename)
	lst = file_root.split('\\')
	rawid = filename[:-5]
	cover = ''
	coverpath = os.path.join(DATAROOT, 'cover', lst[-2], filename[:-5].lower()+'.jpg')
	if os.path.exists(coverpath):
		cover = '/smartlib/cqubotubook/' + lst[-2]+'/'+filename[:-5].lower()+'.jpg'
	lngid = 'CQU_BOTU_TS_' + str(rawid)
	batch = time.strftime('%Y%m%d',time.localtime(time.time()))
	batch = batch + '00'
	language = 'EN'
	country = 'US'
	provider = 'cqubotubook'
	type = '1'
	medium = '2'
	date = "1900"
	date_created = "19000000"
	provider_url = 'cqubotubook@http://222.198.130.68/BookRead.aspx?bookid=' + str(rawid) 
	provider_id = 'cqubotubook@' + str(rawid)
	f = open(file_root,'r',encoding = "utf-8")
	htmlText = f.read()
	soup = BeautifulSoup(htmlText,'lxml')
	title = ""
	identifier_pisbn = ""
	publisher = ""
	creator =""
	provider_subject = ""
	try:
		div = soup.find("div",id = "token")
		tas = div.find_all('a')
	except:
		print(file_root)
		return
	for a in tas[1:]:
		provider_subject = provider_subject+a.text+";"
	provider_subject = provider_subject[:-1]
	provider_subject = provider_subject.replace("'",'"')
	b = soup.find('b',id = "bookname")
	if not b:
		return
	title = b.text
	title = title.replace("'",'"').replace('\n',"").replace('\r','').replace("\t","").replace("\0","")
	if title =="":
		#print(rawid)
		return
	#print(title)
	p = soup.find('p',id = "author")
	if not p:
		creator = ''
	else:
		author = p.find('a')
		creator = author.text
		creator = creator.replace("'",'"')
		creator = creator.replace(",",";")
	isbn = soup.find('p',id = 'isbn')
	if not isbn:
		identifier_pisbn = ""
	else:
		isbn = isbn.text
		isbn = isbn.split(':')
		identifier_pisbn = isbn[1]
		identifier_pisbn = identifier_pisbn.replace("'",'"')
	press = soup.find('p',id = 'press')
	if not press:
		publisher = ""
	else:
		press = press.text
		press = press.split(':')
		publisher = press[1]
		publisher = publisher.replace("'",'"')
	

	sql = "insert into modify_title_info_zt(publisher,lngid,rawid,language,country ,identifier_pisbn,provider,type ,medium ,batch, title,provider_url, provider_id ,creator,cover,provider_subject,date,date_created) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" % (publisher,lngid,rawid,language,country ,identifier_pisbn,provider,type ,medium ,batch, title,provider_url, provider_id ,creator,cover,provider_subject,date,date_created)
	return sql
	
if __name__=="__main__":
	shutil.copyfile(DB3TEMPLATE, DB3FILE)   #oldfile和newfile都只能是文件

	html_root = os.path.join(DATAROOT, 'html')
	sqlList = []
	cnt = 0
	for parent, dirnames, filenames in os.walk(html_root):
		for filename in filenames:
			if not filename.endswith('.html'):
				continue
			cnt+=1
			sql = get_data(parent, filename)
			if sql == None:
				continue
			sqlList.append(sql)
			if cnt%100 == 0:
				print(cnt)
				insertData(sqlList)
				sqlList = []
				#print('time total:%.2fs' % (time.time()-StartTime))
		insertData(sqlList)
		sqlList = []
	print('time total:%.2fs' % (time.time()-StartTime))
		
		
		
		
