from scrapyProject.spiders.DataBase import  databaseCore
#就房子信息而言，我根本不需要重新采集一次，因为每次采集都是最新
class fangDataBase(databaseCore):
    def insertAndUpdateCatalog(self, CityName, ProvinceName, urlNewHouse, urlSencondHandHouse, urlRentalhousing):
        id=self.getID(ProvinceName+CityName)
        self.InsertOrUpdateData(tableName="fangcatalog",
                                valueForSet=[id, CityName, ProvinceName, urlNewHouse, urlSencondHandHouse, urlRentalhousing],
                                colForSet=["id", "CityName", "ProvinceName", "urlNewHouse", "urlSencondHandHouse", "urlRentalhousing"],
                                valueForCompare=[id],
                                colForCompare=["id"],
                                compareRule=["="]
                                )
        return id

    def insertAnddUpdateHouseInfo(self, name, address,telephone, housetype, characteristic, collecttime, idCatalog, price, type):
        self.InsertOrUpdateData(tableName="fanghouseinfo",
                                valueForSet=[name, address, telephone, housetype, characteristic, collecttime,idCatalog, type, price],
                                colForSet=["name", "address", "telephone", "housetype", "characteristic", "collecttime",
                                           "idCatalog",  "type", "price"],
                                valueForCompare=[],
                                colForCompare=[],
                                compareRule=[]
                                )

    def getNeedDownUrls(self):
        vData = self.GetData(tableName="fangcatalog", serchInx=["id", "urlNewHouse", "urlSencondHandHouse", "urlRentalhousing"], colName="", compareValue="")
        for lineData in vData:
            yield (lineData["id"], lineData["urlNewHouse"], lineData["urlSencondHandHouse"], lineData["urlRentalhousing"])
