import csv
import os

import lxml.etree as le
import pandas as pd

# 返回唯一的xpath结果
import pandas as pd


def xpath_one(contentx, path, default=None):
    if type(contentx) == str or type(contentx) == bytes:
        contentx = le.HTML(contentx)
    rets = contentx.xpath(path)
    return rets[0] if rets else default


# 返回多个xpath的结果
def xpath_all(contentx, path, strip=False):
    if type(contentx) == str or type(contentx) == bytes:
        contentx = le.HTML(contentx)
    rets = contentx.xpath(path)
    if strip:
        ret_strips = []
        for ret in rets:
            ret_strips.append(ret.strip())
        return ret_strips
    else:
        return rets


# 合并的得到的结果
def xpath_union(contentx, path, sep='', strip=True, default=None):
    if type(contentx) == str or type(contentx) == bytes:
        contentx = le.HTML(contentx)
    rets = xpath_all(contentx=contentx, path=path, strip=strip)
    if rets:
        return sep.join(rets)
    else:
        return default


class P():
    ul = '//div[@id="introduction"]//div[@class="content"]/ul'


def extract_inner(html):
    this_data = {}

    this_data["title"] = xpath_one(html, '//div[@class="title"]/h1/text()')
    this_data["价格"] = xpath_one(html, '//span[@class="unitPriceValue"]/text()')
    this_data["地区"] = xpath_one(html, '//div[@class="areaName"]//span[@class="info"]/a[1]/text()')
    uls = xpath_all(html, P.ul)
    if len(uls) >= 1:
        for li in xpath_all(uls[0], './li'):
            label = xpath_one(li, './/span//text()')
            value = xpath_union(li, './text()')
            this_data[label] = value
    if len(uls) >= 2:
        for li in xpath_all(uls[1], './li'):
            label = xpath_one(li, './span[1]//text()')
            value = xpath_union(li, './span[2]//text()')
            this_data[label] = value


    return this_data


def extract(data_dir, target_filepath=None) -> pd.DataFrame:
    datas = []
    for fn in os.listdir(data_dir):
        if(fn.split(r".")[-1]=="csv"):
            continue
        fp = os.path.join(data_dir, fn)
        with open(fp, 'rb') as f:
            html = f.read()
        this_data = extract_inner(html)
        this_data["城市"] = data_dir.split(r"/")[-1]
        datas.append(this_data)
    df = pd.DataFrame(datas)
    if target_filepath:
        df.to_csv(target_filepath, index=False, mode='w')
    return df


if __name__ == '__main__':
    with open('./cityUrl.csv', mode="r", newline='') as f:
        reader = csv.reader(f)
        for idx, row in enumerate(reader):
            if idx == 0:
                continue
            extract(f'./html2/{row[0]}',target_filepath=f'./html2/result/{row[0]}.csv')
