"""
1. 满足甲方需求
2. 自己选择
关系型数据库：mysql
    表与表存在关联关系 ：适合做后端管理系统
非关系型数据库：mongodb
    结构松散不固定：适合批量存储爬虫数据
"""
import requests
from lxml import etree
import pymongo
import pymysql


class MySpider:
    def __init__(self):
        pass

    def save_mysql(self, datas):
        """
        预先建库建表
        create database toscrapy charset = utf8;
        use toscrapy
        create table item(id int primary key not null auto_increment, title varchar(300) not null, author varchar(50) not null);
        create table tag(id int not null primary key auto_increment, title varchar(30) not null, item_id int not null,   constraint fk_item_id foreign key(item_id)  references item(id) on update cascade on delete cascade );
        desc item;
        desc tag;
        """

        con = pymysql.connect(user='root', password='123456', db='fyx', host='localhost', port=3306, charset='utf8')
        cur = con.cursor()

        # 调整表结构 - 增加字段长度
        self.adjust_table_structure(cur)

        cur.execute("delete from tag")
        cur.execute("delete from item")

        item_id = 1001
        for data in datas:
            for item in data['datas']:
                # 安全插入item表 - 限制字段长度
                safe_content = item['content'][:500]  # 限制在500字符内
                safe_author = item['author'][:100]    # 限制在100字符内
                cur.execute("insert into item values(%s, %s, %s)", (item_id, safe_content, safe_author))

                # 安全插入tag表 - 限制字段长度并修正数据格式
                tags = []
                for tag in item['tags']:
                    safe_tag = tag[:100]  # 限制在100字符内
                    # 修正：只插入实际需要的字段值，让id自增
                    tags.append((safe_tag, item_id))

                # 修正：明确指定插入的字段
                cur.executemany("insert into tag (title, item_id) values(%s, %s)", tags)
                item_id += 1

        con.commit()
        cur.close()
        con.close()
        print("数据保存成功！")

    def adjust_table_structure(self, cursor):
        """调整表结构以容纳更长的数据"""
        try:
            # 扩大item表的字段长度
            cursor.execute("ALTER TABLE item MODIFY title VARCHAR(1000)")
            cursor.execute("ALTER TABLE item MODIFY author VARCHAR(200)")
            print("已调整item表结构")
        except Exception as e:
            print(f"调整item表结构失败: {e}")

        try:
            # 扩大tag表的字段长度
            cursor.execute("ALTER TABLE tag MODIFY title VARCHAR(200)")
            print("已调整tag表结构")
        except Exception as e:
            print(f"调整tag表结构失败: {e}")

    def save_mongodb(self, obj):
        conn = pymongo.MongoClient('localhost', 27017)
        db = conn['toScrapy2']
        collection = db['toScrapy2']
        collection.insert_many(obj)
        conn.close()

    def get_all_datas(self):
        urls = [f'https://quotes.toscrape.com/page/{page}/' for page in range(1, 3)]
        all_datas = []
        for url in urls:
            page_data = self.get_page_data(url)
            all_datas.append(page_data)
        return all_datas

    def get_headers(self):
        return {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36"
        }

    def get_page_data(self, url):
        res = requests.get(url)
        tree = etree.HTML(res.text)
        items = tree.xpath('//div[@class="quote"]')
        datas = []
        for item in items:
            author = item.xpath('.//small[@class="author"]/text()')[0]
            content = item.xpath('.//span[@class="text"]/text()')[0][1:-1]
            tags = item.xpath('.//a[@class="tag"]/text()')
            datas.append(
                {
                    'author': author,
                    'content': content,
                    'tags': tags,
                }
            )

        return {
            'url': url,
            'datas': datas,
        }


spider = MySpider()
datas = spider.get_all_datas()
# spider.save_mongodb(datas)

spider.save_mysql(datas)