import sys
sys.path.append("/Users/lidong/Desktop/zhouyu/project/crawl-lidong/utils/login.py")
import  time,pymysql,pandas,re
from numpy import tile
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from datetime import datetime
from  urllib.parse import quote_plus
from utils.login import login_58
# from crawlab import save_item
# from utils.login import login_58

settings = { 'host':'172.16.10.201','user':'zy001','port':3306,'password':'zy@123','database':'crawl-bk','charset' : 'utf8'}
conn = pymysql.connect(**settings)
print('##################################################数据库连接成功##################################################')
cursor = conn.cursor()
print('##################################################获取游标成功##################################################')

sql = "insert into 58_rent_all(url,list_url,region,types,create_time) values(%s,%s,%s,%s,%s)"


area_url = {
    '五华':'https://km.58.com/wuhua/chuzu/?PGTID=0d3090a7-0021-d839-cbfb-5823d02c563c&ClickID=2',
    '盘龙': 'https://km.58.com/panlong/chuzu/?PGTID=0d3090a7-0022-01a6-2bcb-ecce1256f609&ClickID=2',
    '官渡':'https://km.58.com/guandu/chuzu/?PGTID=0d3090a7-0021-f83e-a34c-2d7eb841c8bb&ClickID=2',
    '西山':'https://km.58.com/xishan/chuzu/?PGTID=0d3090a7-0021-e0c4-ac77-258efcbd9e36&ClickID=2',
    '呈贡':'https://km.58.com/chenggong/chuzu/?PGTID=0d3090a7-0022-16e8-f95a-9d9e5ba055e6&ClickID=2',
    '大理市':'https://dali.58.com/dalishi/chuzu/?PGTID=0d3090a7-072c-a970-7abd-e394784c8b7a&ClickID=1',
    '祥云县':'https://dali.58.com/xiangyunxian/chuzu/?PGTID=0d3090a7-01cc-cc80-8cc5-412b62c785a9&ClickID=3',
    '弥渡县':'https://dali.58.com/miduxian/chuzu/?PGTID=0d3090a7-01cc-ebc1-77e2-4c2931385bac&ClickID=1',
    '宾川县':'https://dali.58.com/binchuanxian/chuzu/?PGTID=0d3090a7-01cd-08a1-0361-9b4167c73f61&ClickID=1',
    '巍山县':'https://dali.58.com/weishanzizhi/chuzu/?PGTID=0d3090a7-01cc-fcf9-2de9-c869f03b558e&ClickID=1',
    '鹤庆县':'https://dali.58.com/heqingxian/chuzu/?PGTID=0d3090a7-01cd-24e3-753b-648fd4711111&ClickID=1',
    '洱源县':'https://dali.58.com/eryuanxian/chuzu/?PGTID=0d3090a7-01cd-7b14-3a0d-35a8d428c60d&ClickID=1',
    '南涧县':'https://dali.58.com/nanjianzizhi/chuzu/?PGTID=0d3090a7-01cd-576c-a9ed-030453334428&ClickID=1',
    '剑川县':'https://dali.58.com/jianchuanxian/chuzu/?PGTID=0d3090a7-01cd-174c-f548-166d399ad58e&ClickID=1',
    '永平县':'https://dali.58.com/yongpingxian/chuzu/?PGTID=0d3090a7-01cc-dffb-cbc9-84cb17bb05f9&ClickID=1',
}

# 重定向路由
redirect  = '''https://km.58.com/pinpaigongyu/42056184631314x.shtml?adtype=1&ClickID=2&slotid=1000856&productid=10006&tid=f265c81a-c26f-46cb-b8c0-fe859027fb20&extParam=%7B%22ppgy_stats%22%3A%7B%22pageSource%22%3A%22%22%2C%22resource%22%3A%2258%22%2C%22abVersion%22%3A%22%22%2C%22launchid%22%3A%22%22%7D%7D&bizresource=0&PGTID=0d3090a7-0021-f553-48cc-e58a3f6d84de'''
url = 'https://callback.58.com/antibot/verifylogin?serialId=221134f49fdf37e0f30afb5aaf81c35a_b76bfa9c875649eb9c03d8dbad2ac83e&code=-5&sign=db93660a8d49208edd5e789f9c0a5df4&namespace=zufanglistphp&url=https%3A%2F%2Fwuhua.58.com%2Fchuzu%2F%3FPGTID%3D0d3090a7-0021-d839-cbfb-5823d02c563c%26ClickID%3D2&platform=pc'

# 获取列表页链接
def get_list_url(area_url,types):
    print('##################################################获取列表页链接开始##################################################')
    driver = webdriver.Chrome(executable_path='chromedriver', options=option)
    for k,v in area_url.items():
        driver.get(v)
        # break
        time.sleep(3)
        try:
            page_total =driver.find_element_by_xpath('//*[@id="pager_wrap"]/div/a[last()]')
            while(not page_total):
                driver.get(v)
                page_total = driver.find_element_by_xpath('//*[@id="pager_wrap"]/div/a[last()]')
            page_total = page_total.text
            if '下一页' in page_total:
                page_total =driver.find_element_by_xpath('//*[@id="pager_wrap"]/div/a[last()-1]').text
        except:
            page_total = 1

        for i in range(int(page_total)):
            try:
                lis = driver.find_elements_by_xpath('//ul[@class="house-list"]/li/div[@class="des"]/h2/a')
                ls = []
                for i in lis:
                    url = i.get_attribute('href')
                    list_url = driver.current_url
                    region = k
                    ls.append((url,list_url,region,types,datetime.now()))
                cursor.executemany(sql,ls)
                conn.commit()
                driver.find_element_by_xpath('//*[@id="pager_wrap"]/div/a[last()]').click()
            except Exception as e:
                # print(e)
                if list_url != driver.current_url:
                    driver.get(list_url)
                    time.sleep(20)
                    driver.get(list_url)
                continue

    driver.close()
    print('##################################################获取列表页链接结束##################################################')
    return '##################################################{region}页面连接抓取成功##################################################'.format(region=region)


# 数据去重并生成新表
def remove_duplicate(s_table,a_table):
    print('##################################################去重并生成新表##################################################')
    sql = "select * from  " + s_table
    data1 = pandas.read_sql(sql,conn)
    data2 = data1.drop_duplicates(subset=["url"],keep="first")
    pandas.io.sql.to_sql(data2, a_table, con='mysql+pymysql://zy001:'+quote_plus("zy@123")+'@172.16.10.201:3306/crawl?charset=utf8mb4', index=False, if_exists='append')
    info = '##################################################{s_table}表数据已经去重并生成新表:{a_table}##################################################'.format(s_table=s_table,a_table=a_table )
    print(info)
    return info
    

def save_datail_date_1(url,driver,table):
    a={'驋':1,'閏':2,'XA':3,'xa':4,'鑶':5,'餼':6,'xs':7,'XAx':8,'xax':9,'龤':0}
    d = {}
    k_list = [i for i in driver.find_elements_by_xpath('//ul[@class="house-info-list"]/li')]
    for i in k_list:
        k = i.find_element_by_xpath('./i').text
        v = i.find_element_by_xpath('./span').text
        d[k]=v

    for k,v in d.items():
        for k1,v1 in a.items():
            if k1 in v:
                d[k] = v.replace(k1,str(v1))





    # v_list = [i.text for i in driver.find_elements_by_xpath('//ul[@class="house-info-list"]/li/span')]
    # PHONE = driver.find_element_by_xpath('/html/body/div[3]/div[3]/div[1]').text
    # for i in v_list:
    #     for  k,v in a.items():
    #         if k in v_list:



       
       
    # print(v_list)
    # location = driver.
            pass



def save_datail_date(url,driver,table):
    s = 'https://404'
    if s in driver.current_url:
        print(404)
        return '404'
    dic = {'租凭方式': 'rental_type', '房屋类型': 'house_type', '朝向楼层': 'orientation', '所在小区': 'community', '所属区域': 'region',
           '详细地址': 'location'}
    d = {}
    try:
        list_s = driver.find_elements_by_xpath('//ul[@class="f14"]/li')
        try:
            price = driver.find_element_by_xpath('//div[@class="house-pay-way f16"]/span[1]').text
            rental_expenses = price.split(' ')
            if len(rental_expenses) > 1:
                d['rental_expenses'] = rental_expenses[0]
                d['unit_of_expenses'] = rental_expenses[1]
        except Exception:
            pass

        for i in list_s:
            k = i.find_element_by_xpath('./span[1]').text[:-1]
            v = i.find_element_by_xpath('./span[2]').text
            if dic.get(k):
                d[dic.get(k)] = v
        house_type= d.get('house_type').split('  ')
        if len(house_type)>2:
            d['house_type'] = house_type[0]
            d['area'] = house_type[1]
            d['fitment'] = house_type[2]
        orientation = d.get('orientation').split('  ')
        if len(orientation)>1:
            d['orientation'] = orientation[0]
            d['layer_info'] = orientation[1]
        if d.get('area'):
            num =  re.findall(r'\d+', d.get('area'))
            if len(num)>0:
                d['area'] = num[0]
        d['unit_of_area'] = '㎡'
    
        update_sql = '''UPDATE {table} set house_type ="{house_type}",orientation = "{orientation}",'''\
                    '''community = "{community}",region = "{region}",location = "{location}", area="{area}",'''\
                    '''fitment="{fitment}",layer_info="{layer_info}",unit_of_area = "{unit_of_area}",'''\
                    '''rental_expenses="{rental_expenses}",unit_of_expenses="{unit_of_expenses}" '''\
                    '''where url ="{url}"'''.format(
                    table =table,
                    house_type=d.get('house_type'),
                    orientation=d.get('orientation'),
                    community=d.get('community'),
                    region=d.get('region'),
                    location = d.get('location'),
                    area = d.get('area'),
                    unit_of_area = d.get('unit_of_area'),
                    fitment = d.get('fitment'),
                    layer_info = d.get('layer_info'),
                    rental_expenses =d.get('rental_expenses'),
                    unit_of_expenses =d.get('unit_of_expenses'),
                    url = url
                    )
        cursor.execute(update_sql)
        conn.commit()
        print('success')
    except Exception as e:
        update_s = '''UPDATE 58_rent_all  set exception ="{e}" where url ="{url}"'''.format(e=e,url=url)
        cursor.execute(update_s)
        print(e)
        time.sleep(10)
        conn.commit()
    
        # print(e)

def get_datail_date(table, driver):
    print('##################################################开始获取并存储详细信息##################################################')
    sql = 'select url from 58_rent_all where types="住宅" and  community is  NULL and exception is null order by id desc'
    # driver.find_element('你要找的页面不在这个星球上！')

    cursor.execute(sql)
    data = cursor.fetchall()
    for i in data:
        try:
            url = i[0]
            driver.get(url)
            # driver.get('https://km.58.com/zufang/49566069100552x.shtml?houseId=2422415161056258&shangquan=chenggongxc&shangquanId=13323&dataSource=0')
            try:
                text = driver.find_element_by_xpath('/html/body/div[2]/div/h1').text
                if text == '你要找的页面不在这个星球上！' or driver.cu:
                    update_sql = "UPDATE 58_rent_all  set exception = '{e}' where url ='{url}'".format(e=text, url=url)
                    cursor.execute(update_sql)
                    conn.commit()
                    continue
            except:
                if 'pinpaigongyu' in driver.current_url:
                    continue
                save_datail_date(url, driver,table)
        except Exception as e:
            if driver.current_url != url:
                time.sleep(30)
                driver.get(url)
                try:
                    text = driver.find_element_by_xpath('/html/body/div[2]/div/h1').text
                    if text == '你要找的页面不在这个星球上！':
                        update_sql = "UPDATE 58_rent_all  set exception = '{e}' where url ='{url}'".format(e=text, url=url)
                        cursor.execute(update_sql)
                        conn.commit()
                        continue
                except:
                    if 'pinpaigongyu' in driver.current_url:
                        continue
                    save_datail_date(url, driver,table)
            else:
                # 标记报错路由，跳过
                update_sql = "UPDATE 58_rent_all  set exception = '{e}' where url ='{url}'".format(e=e, url=url)
                cursor.execute(update_sql)
                print('错误')
                conn.commit()
                driver.get(url)

    info= '##################################################{table}表详情数据已经完成获取并完成存储##################################################'.format(table=table )
    return info



if __name__ == '__main__':
    option = ChromeOptions()
    prefs = {
        'profile.default_content_setting_values': {
            'images': 2
        }
    }
    # option.headless = True
    option.add_experimental_option('prefs', prefs)
    option.add_experimental_option('excludeSwitches', ['enable-automation'])
    option.add_argument('--ignore-certificate-errors')
    driver = webdriver.Chrome(executable_path='/Users/lidong/Desktop/zhouyu/project/crawl-lidong/crawlab_env/bin/chromedriver', options=option)
    driver = login_58(driver)
    # print('##################################################浏览器加载成功##################################################')
    # get_list_url(area_url,types='住宅')
    # remove_duplicate(s_table='58_rent_all',a_table = '58_rent_all_total')
    get_datail_date(table='58_rent_all', driver=driver)
    # conn.close()
    # cursor.close()
