#!/usr/bin/env python
# encoding: utf-8

"""
@version: v1.0
@author: xag
@license: Apache Licence
@contact: xinganguo@gmail.com
@site: http://www.xingag.top
@software: PyCharm
@file: 4.dytt.py
@time: 2018/9/16 18:46
@description：爬电影天堂【 lxml + xpath + requests】【2018新片精品，包含更多】
"""

import requests
from bs4 import BeautifulSoup
from datetime import datetime
import datetime
import time

from sqlalchemy import create_engine, Column, Integer, String, Date, Double
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# 建立数据库连接
db_url = "mysql+pymysql://root:123456@localhost:3308/dbtest2"
engine = create_engine(db_url, echo=True)  # echo=True可用于显示SQL语句执行情况
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()


HEADERS = {
	'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36',
}


def testRequests(url):
	today = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
	brand, bug, csName, dataSource, detail_url, grab_time, introduct, model, series, status, tousu_date = '', '', '', '车质网', url[::-1], today, '', '', '', '', ''
	response = requests.get(url, headers=HEADERS)
	html_content = response.content.decode('gbk')
	# 使用BeautifulSoup解析HTML内容
	soup = BeautifulSoup(html_content, 'html.parser')
	# 选择指定的元素 bug
	bug = soup.find('h1', id='newstitle')
	# 提取文本内容
	if bug:
		bug = bug.text.strip()
		print(bug)
	else:
		print("未找到指定元素")
	# 选择包含投诉信息的<div>元素
	jbqk_div = soup.find('div', class_='jbqk')

	# 选择所有的<li>元素
	li_elements = jbqk_div.find_all('li')

	# 创建一个字典来存储提取的信息
	complaint_info = {}

	# 遍历每个<li>元素，提取信息并存储到字典中
	for li in li_elements:
		# 获取<li>元素中的文本内容
		li_text = li.text.strip()
		# 根据冒号分割文本，获取冒号后面的部分作为值
		key, value = li_text.split('：', 1)
		complaint_info[key] = value.strip()

	# 打印提取的信息
	for key, value in complaint_info.items():
		print(f"{key}:{value}")

	# 投诉内容
	tsnr_div = soup.find('div', class_='tsnr')
	# 给字段赋值
	introduct = tsnr_div.find('p').text.strip()
	brand = complaint_info['投诉品牌']
	csName = brand
	model = complaint_info['投诉车型']
	series = complaint_info['投诉车系']
	status = url+"-"+series+"-"+brand
	# 将字符串解析为datetime对象
	dt = datetime.datetime.strptime(complaint_info['投诉时间'], "%Y-%m-%d %H:%M")
	# 设置小时、分钟、秒和微秒为0
	dt_midnight = dt.replace(hour=0, minute=0, second=0, microsecond=0)
	# 格式化datetime对象为字符串
	tousu_date = dt_midnight.strftime("%Y-%m-%d %H:%M:%S")
	tousu_data = {
		'brand': brand,
		'bug': bug,
		'csName': csName,
		'dataSource': dataSource,
		'detail_url': detail_url,
		'grab_time': grab_time,
		'introduct': introduct,
		'model': model,
		'series': series,
		'status': status,
		'tousu_date': tousu_date
	}
	return tousu_data


def insertData(url):
	# 关键词 entity
	class tousuAll(Base):
		id = Column(Integer, primary_key=True)
		__tablename__ = 'tousu_all'
		__table_args__ = {'extend_existing': True}
		brand = Column(String)
		bug = Column(String)
		csName = Column(String)
		dataSource = Column(String)
		detail_url = Column(String)
		grabtime = Column(Date)
		introduct = Column(String)
		model = Column(String)
		series = Column(String)
		status = Column(String)
		tousu_date = Column(Date)
	folder = testRequests(url)
	new_tousu = tousuAll(brand=folder['brand'],
						 bug=folder['bug'],
						 csName=folder['csName'],
						 dataSource=folder['dataSource'],
						 detail_url=folder['detail_url'],
						 grabtime=folder['grab_time'],
						 introduct=folder['introduct'],
						 model=folder['model'],
						 series=folder['series'],
						 status=folder['status'],
						 tousu_date=folder['tousu_date'])
	session.add(new_tousu)
	session.commit()
	# 每入库完一个便休眠两秒
	time.sleep(4)


if __name__ == '__main__':
	urls = [
		"https://www.12365auto.com/zlts/20240324/1146981.shtml",
		"https://www.12365auto.com/zlts/20240324/1146967.shtml",
		"https://www.12365auto.com/zlts/20240323/1146811.shtml",
		"https://www.12365auto.com/zlts/20240323/1146766.shtml",
		"https://www.12365auto.com/zlts/20240324/1147261.shtml",
		"https://www.12365auto.com/zlts/20240324/1147211.shtml",
		"https://www.12365auto.com/zlts/20240325/1147509.shtml",
		"https://www.12365auto.com/zlts/20240324/1147457.shtml"
	]
	# 使用 for 循环遍历数组中的每个字符串
	for url in urls:
		insertData(url)
