from MySpider import settings
import pymysql


class MyDB:
    def __init__(self):
        self.connect = pymysql.connect(
            host='localhost',
            # 甲骨文centos1服务器
            # host='64.110.90.248',
            port=3306,
            user='root',
            password='DEbu2ELoLAmes83O3EL1T6LOxas6sO!',
            db='scrapydata',
            charset='utf8'
        )
        self.cursor = self.connect.cursor()

    def get_url_ids(self, sql):
        self.cursor.execute(sql)
        result = self.cursor.fetchall()
        return [d[0] for d in result]

    def is_unique_id_duplicated(self, table_name, unique_id_name, unique_id_value):
        sql = 'select count(*) from %s where %s=%s' % (table_name, unique_id_name, unique_id_value)
        self.cursor.execute(sql)
        result = self.cursor.fetchone()
        return True if result[0] >= 1 else False

    def __get_spider_db_parameters(self, site_name):
        """
        从数据库中取得spider爬虫参数
        :param site_name:
        :return:
        """
        sql = "select * from spiders where spider_name = '%sSpider'" % site_name
        self.cursor.execute(sql)
        result = self.cursor.fetchone()

        detail_columns_sql = "SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'ScrapyData' AND " \
                             "TABLE_NAME = '%sDetail'" % site_name
        self.cursor.execute(detail_columns_sql)
        detail_columns_result = self.cursor.fetchall()

        index_columns_sql = "SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'ScrapyData' AND " \
                            "TABLE_NAME = '%sIndex'" % site_name
        self.cursor.execute(index_columns_sql)
        index_columns_result = self.cursor.fetchall()
        spider_parameters = {
            'spider_name': result[0],
            'allowed_domains': result[1],
            'unique_key': result[2],
            'homepage_url': result[3],
            'index_page_patten': result[4],
            'detail_page_patten': result[5],
            'cookies': result[6],
            'user': result[7],
            'password': result[8],
            'memo': result[9],
            'index_table_fields': [x[0] for x in index_columns_result],
            'detail_table_fields': [x[0] for x in detail_columns_result]
        }
        return spider_parameters

    def get_spider_parameters(self, site_name, crawl_count=100):
        spider_parameters = self.__get_spider_db_parameters(site_name)

        index_table_fields = spider_parameters['index_table_fields']
        index_values = '%s, ' * (len(index_table_fields) - 1) + '%s'
        index_keys = ', '.join(index_table_fields)
        index_table_name = site_name + "Index"
        index_insert_sql = 'insert into %s (%s) values (%s)' % (index_table_name, index_keys, index_values)
        spider_parameters['index_insert_sql'] = index_insert_sql

        detail_table_fields = spider_parameters['detail_table_fields']
        detail_values = '%s, ' * (len(detail_table_fields) - 1) + '%s'
        detail_keys = ', '.join(detail_table_fields)
        detail_table_name = site_name + "Detail"
        detail_insert_sql = 'insert into %s (%s) values (%s)' % (detail_table_name, detail_keys, detail_values)
        spider_parameters['detail_insert_sql'] = detail_insert_sql

        detail_page_patten = spider_parameters['detail_page_patten']

        # 下面这个表达式遍历不了，不知道为什么，写成for循环可以，挺蹊跷
        # start_urls = [detail_page_patten % d for d in list(mydb.get_url_ids(select_sql))]
        detail_select_sql = 'select url_id from %s where url_id not in (select url_id from %s) LIMIT %s' \
                            % (index_table_name, detail_table_name, crawl_count)
        detail_start_urls = []
        detail_url_ids = self.get_url_ids(detail_select_sql)
        for d in detail_url_ids:
            detail_start_urls.append(detail_page_patten % d)

        spider_parameters['detail_select_sql'] = detail_select_sql
        spider_parameters['detail_start_urls'] = detail_start_urls

        return spider_parameters

    def get_xpath(self):
        sql = "select * from xpaths"
        self.cursor.execute(sql)
        result = self.cursor.fetchall()
        return result

    def save_xpath(self, domain, url, xpath, result):
        try:
            sql = "insert into xpaths (domain,url,xpath,result) values ('%s','%s','%s','%s')" % (domain, url, xpath, result)
            self.cursor.execute(sql)
            self.connect.commit()
        except Exception:
            pass


    def insert_data_into_table(self,table_name,data_dict):
        """
        插入数据到指定表中。

        :param table_name: 要插入数据的表名
        :param data_dict: 包含数据的字典，键是列名，值是对应的值
        """

        # 获取表的列名
        columns = ', '.join(data_dict.keys())
        placeholders = ', '.join(['%s'] * len(data_dict))
        sql = f'INSERT INTO {table_name} ({columns}) VALUES ({placeholders})'

        # 执行插入操作
        self.cursor.execute(sql, list(data_dict.values()))
        self.connect.commit()
