# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import sqlite3

class VisualizationPipeline:
    def open_spider(self,spider):
        path='E:/Procedure/Python/Assignment/Visualization/city_weather.db'
        self.db = sqlite3.connect(path)
        self.cursor = self.db.cursor()
        self.cursor.execute('''
                    CREATE TABLE IF NOT EXISTS weathers (
                        id INTEGER PRIMARY KEY AUTOINCREMENT,
                        name TEXT,
                        date TEXT,
                        high_temp TEXT,
                        low_temp TEXT,
                        weather_desc TEXT
                    )
                ''')
        self.db.commit()
    def close_spider(self,spider):
        self.db.commit()
        self.db.close()

    def process_item(self, item, spider):
        name=item.get('name') or ''
        date=item.get('date') or ''
        high_temp=item.get('high_temp','')
        low_temp=item.get('low_temp') or ''
        weather_desc=item.get('weather_desc','')
        self.cursor.execute('''
                        INSERT INTO weathers (name, date, high_temp, low_temp, weather_desc) VALUES (?, ?, ?, ?, ?)
                    ''', (name, date, high_temp, low_temp, weather_desc))
        #self.db.commit()
        return item
"""
class DbPipeline:
    def __init__(self):
        self.conn=pymysql.connect(host='10.7.190.76',port=3306,
                                  user='wangdachui',password='Wang.618',
                                  database='spider',charset='utf8mb4')
        self.cursor=self.conn.cursor
    def close_spider(self):
        self.conn.close()
    def process_item(self,item,spider):
        pass
"""