from selenium import webdriver
from selenium.webdriver.common.by import By
from lxml import etree
import pandas as pd
import os
from multiprocessing import Process
from Utils import Data_Processing

class Anjuke_Spider:
    def __init__(self):
        self.browser = webdriver.Edge(executable_path='./msedgedriver.exe')
        self.url = 'https://sjz.anjuke.com/sale/'
        self.sjz_area = ['changana', 'qiaoxi', 'yuhuaf', 'xinhuaf', 'kfqsjz']
        # 找不到元素就等待3秒
        self.browser.implicitly_wait(3)

    def __scroll_down_page(self, speed=8):
        current_scroll_position, new_height = 0, 1
        while current_scroll_position <= new_height:
            current_scroll_position += speed
            self.browser.execute_script("window.scrollTo(0, {});".format(current_scroll_position))
            new_height = self.browser.execute_script("return document.body.scrollHeight")

    def start(self):
        for sub_area in self.sjz_area:
            url = self.url + f'{sub_area}/'
            self.browser.get(url)
            # next_page_btn = self.browser.find_element(By.XPATH,
            #                                        '//*[@id="esfMain"]/section/section[3]/section[1]/section[4]/div/a[2]')

            while True:
                # self.browser.execute_script('window.scrollTo(0, document.body.scrollHeight)')
                self.__scroll_down_page()
                house_data = self.browser.find_element(By.XPATH,
                                                       '//*[@id="esfMain"]/section/section[3]/section[1]/section[2]').get_attribute(
                    'innerHTML')
                house_data = etree.HTML(house_data)

                # print(etree.tostring(house_data, encoding='utf-8').decode('utf-8'))
                house_data = house_data.xpath('/html/body/*')
                print("此页的数据大小:" + str(len(house_data)))
                house_list = []
                for house in house_data:
                    tem_data = {}
                    tem_data['href'] = house.xpath('./a/@href')[0]
                    tem_data['title'] = house.xpath('./a/div[2]/div[1]/div[1]/h3/text()')[0]
                    tem_data['property_name'] = house.xpath('./a/div[2]/div[1]/section/div[2]/p[1]/text()')[0]
                    tem_data['property_info'] = str(house.xpath('string(./a/div[2]/div[1]/section/div[1]/p[1]/.)'))
                    tem_data['property_area'] = house.xpath('./a/div[2]/div[1]/section/div[1]/p[2]/text()')[0]
                    tem_data['property_orientation'] = house.xpath('./a/div[2]/div[1]/section/div[1]/p[3]/text()')[0]
                    tem_data['property_floor'] = house.xpath('./a/div[2]/div[1]/section/div[1]/p[4]/text()')[0] if len(
                        house.xpath('./a/div[2]/div[1]/section/div[1]/p[4]/text()')) > 0 else ''
                    tem_data['property_year'] = house.xpath('./a/div[2]/div[1]/section/div[1]/p[5]/text()')[0] if len(
                        house.xpath('./a/div[2]/div[1]/section/div[1]/p[5]/text()')) > 0 else ''
                    tem_data['property_devname'] = house.xpath('./a/div[2]/div[1]/section/div[2]/p[1]/text()')[0]
                    tem_subList = house.xpath('./a/div[2]/div[1]/section/div[2]/p[2]/*')
                    tem_loc = []
                    for item in tem_subList:
                        tem_loc += item.xpath('./text()')
                    tem_data['property_loc'] = '-'.join(tem_loc)
                    tem_data['property_price'] = str(house.xpath('string(./a/div[2]/div[2]/p[1]/.)'))
                    tem_data['property_average_price'] = house.xpath('./a/div[2]/div[2]/p[2]/text()')[0]
                    # print(house.tag)
                    # print(house.xpath('./a/@href'))
                    house_list.append(tem_data)
                print(house_list)
                save_data(house_list, sub_area)
                next_page_btn = self.browser.find_element(By.XPATH,
                                                          '//*[@id="esfMain"]/section/section[3]/section[1]/section['
                                                          '4]/div/a[2]')
                if next_page_btn.get_attribute('href') == 'javascript:void(0);':
                    break
                else:
                    next_page_btn.click()
                # print(next_page_btn.text)


def save_data(dataList, areaname='area1'):
    df = pd.DataFrame(dataList)
    if os.path.exists(f"./sjz_{areaname}.csv"):
        df.to_csv(f"./sjz_{areaname}.csv", mode="a", header=False)
    else:
        df.to_csv(f"./sjz_{areaname}.csv", mode="a", header=True)


def sub_process():
    Anjuke_Spider().start()
    Data_Processing.clean()

def run():
    current_process = Process(target=sub_process)
    current_process.start()
    return current_process

# if __name__ == '__main__':
#     Anjuke_Spider().start()
