#!/usr/bin/env python
# encoding: utf-8

import datetime
import time
from datetime import datetime
from urllib.parse import quote_plus

from DrissionPage import WebPage
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker

from models import TousuAll, CarmapTousu

# 建立数据库连接
username = 'datauser'
password = quote_plus('Datauser@2020')
host = '47.99.33.57'
port = '36011'
database = 'saicnqms_2020'
# database = 'saicnqms'

db_url = f"mysql+pymysql://{username}:{password}@{host}:{port}/{database}"
engine = create_engine(db_url, echo=True)  # echo=True可用于显示SQL语句执行情况
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()

HEADERS = {
	'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36',
}


def testRequests(url, page):
	time.sleep(5)
	today = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
	brand, bug, csName, dataSource, detail_url, grab_time, introduct, model, series, status, tousu_date = '', '', '', '车质网', url[::-1], today, '', '', '', '', ''
	# 方法 1：使用 split() 和 rsplit()
	number = url.split('/')[-1].rsplit('.', 1)[0]
	print(number)
	page.ele('#wd').clear()
	time.sleep(2)
	page.ele('#wd').input(number)
	time.sleep(1)
	page.run_js('document.querySelector("[id=gjz_sub]").click()')
	# 等待页面加载
	time.sleep(5)
	# 获取表格元素
	table = page.ele('.tslb_b')
	if table:
		# 获取所有行元素
		rows = table.eles('tag:tr')
		print(f"找到 {len(rows)} 行数据")
		# 遍历行并打印第一列数据
		for row in rows:
			cells = row.eles('tag:td')
			if cells:
				print(cells[1].text)
				brand = cells[1].text
				csName = brand
				series = cells[2].text
				model = cells[3].text
				introduct = cells[4].text
				bug = cells[5].text
				status = url + "-" + series + "-" + brand
				# 将字符串解析为datetime对象
				dt = datetime.strptime(cells[6].text, "%Y-%m-%d")
	else:
		print("未找到表格元素")
	# 设置小时、分钟、秒和微秒为0
	dt_midnight = dt.replace(hour=0, minute=0, second=0, microsecond=0)
	# 格式化datetime对象为字符串
	tousu_date = dt_midnight.strftime("%Y-%m-%d %H:%M:%S")
	FamilyInfo = selectFamilyInfo(csName, brand, series)
	tousu_data = {
		'brand': brand,
		'bug': bug,
		'csName': csName,
		'dataSource': dataSource,
		'detail_url': detail_url,
		'grab_time': grab_time,
		'introduct': introduct,
		'model': model,
		'series': series,
		'status': status,
		'factoryname_unic': FamilyInfo['factoryname_unic'],
		'brandname_unic': FamilyInfo['brandname_unic'],
		'familyname_unic': FamilyInfo['familyname_unic'],
		'tousu_date': tousu_date
	}
	return tousu_data


def insertData(url, page):
	folder = testRequests(url, page)
	if folder == 0:
		print(url + " 已经被撤诉了!!!")
	else:
		new_tousu = TousuAll(brand=folder['brand'],
							 bug=folder['bug'],
							 csName=folder['csName'],
							 dataSource=folder['dataSource'],
							 detail_url=folder['detail_url'],
							 grabtime=folder['grab_time'],
							 introduct=folder['introduct'],
							 model=folder['model'],
							 series=folder['series'],
							 status=folder['status'],
							 factoryname_unic=folder['factoryname_unic'],
							 brandname_unic=folder['brandname_unic'],
							 familyname_unic=folder['familyname_unic'],
							 tousu_date=folder['tousu_date'])
		# 插入前查询链接是否已经存在库里
		url_strip_ = new_tousu.detail_url.split(":")[0]
		print(url_strip_)
		# 模糊查询
		search_term = url_strip_ + "%"
		results = session.query(TousuAll).filter(TousuAll.detail_url.like(search_term)).all()
		detail_url = new_tousu.detail_url[::-1]
		if not results:
			session.add(new_tousu)
			session.commit()
			print()
			print()
			print(detail_url + '    已经插入成功!!!')
			print()
			print()
		else:
			print()
			print()
			print(detail_url + '    已经存在数据库中!!!')
			print()
			print()
	# 每入库完一个便休眠两秒
	time.sleep(4)


def getData():
	# 创建页面对象
	page = WebPage()
	# 访问网址
	page.get('https://www.12365auto.com/zlts/')
	urls = [
		"https://www.12365auto.com/zlts/20240829/1236197.shtml",
"https://www.12365auto.com/zlts/20240829/1236397.shtml",

"https://www.12365auto.com/zlts/20240829/1236687.shtml",

"https://www.12365auto.com/zlts/20240829/1236741.shtml"
	]
	# 使用 for 循环遍历数组中的每个字符串
	for url in urls:
		insertData(url, page)


def selectFamilyInfo(csName, brand, series):
	brands = {'名爵': '上汽乘用车',
			  '荣威': '上汽乘用车',
			  '斯柯达': '上汽大众',
			  '上汽大众': '上汽大众',
			  '上汽大通': '上汽大通',
			  '别克': '上汽通用',
			  '凯迪拉克': '上汽通用',
			  '雪佛兰': '上汽通用',
			  '宝骏': '上汽通用',
			  '新宝骏': '上汽通用',
			  '五菱汽车': '上汽通用',
			  '依维柯': '南京依维柯',
			  '飞凡': '上汽集团',
			  '上汽奥迪': '上汽大众',
			  '智己': '上汽集团'}
	# 判断brand是不是在brands中
	if "五菱" in csName:
		csName = "上汽通用"
	else:
		csName = brands.get(brand, brand)
	data = session.query(CarmapTousu).filter(CarmapTousu.brand == brand,
											 CarmapTousu.series == series).first()
	FamilyInfo = {
		'factoryname_unic': data.factoryname_unic,
		'brandname_unic': data.brandname_unic,
		'familyname_unic': data.familyname_unic
	}
	return FamilyInfo


if __name__ == '__main__':
	getData()
