import scrapy
import mysql.connector
from scrapy_splash import SplashRequest

class Job51Spider(scrapy.Spider):
    name = "job51"
    allowed_domains = ["51job.com"]
    start_urls = ["https://we.51job.com/pc/search?jobArea=000000&keyword=php&searchType=2&keywordType="]

    def __init__(self):
        self.conn = mysql.connector.connect(
            host=self.settings.get('MYSQL_HOST'),
            port=self.settings.get('MYSQL_PORT'),
            user=self.settings.get('MYSQL_USER'),
            password=self.settings.get('MYSQL_PASSWORD'),
            database=self.settings.get('MYSQL_DB'),
            charset=self.settings.get('MYSQL_CHARSET')
        )
        self.cursor = self.conn.cursor()

    def start_requests(self):
        for url in self.start_urls:
            yield SplashRequest(url, self.parse, args={'wait': 0.5})

    def parse(self, response):
        # Extract job data from the response
        job_data = []
        # ... (extract job data logic)

        # Insert the extracted data into the `job` table
        insert_query = "INSERT INTO job (column1, column2, ...) VALUES (%s, %s, ...)"
        for data in job_data:
            self.cursor.execute(insert_query, data)
        self.conn.commit()

    def __del__(self):
        self.cursor.close()
        self.conn.close()
