# python数据基础第十一节：在网页搜集数据并将数据存储到DataFrame中(二)
"""
# python数据基础第十一节：在网页搜集数据并将数据存储到DataFrame中(二)
    主要就是让数据对应起来然后存入对应的DataFrame,读取商品对应的id，获取商品的价格

"""

import json
import urllib.request

from pandas import Series
from pandas import DataFrame
from pandas import read_csv
from bs4 import BeautifulSoup

pids = read_csv("E:\\Python\\pyspark_demo01\\out_data\pids.csv")
# 定义统一的列名，下面可以避免重复输入，直接引用即可
pColumns = ['PID', 'Price']
fColumns = ['PID', 'Feature', 'Property']

# 定义存储价格和属性的数据框
pData = DataFrame(columns=pColumns)
fData = DataFrame(columns=fColumns)
# 开始遍历我们需要抓取的商品数据
for pid in pids.values:
    PID = pid[0].astype(str)

    pUrl = 'http://p.3.cn/prices/get?skuid=J_' + PID
    print("开始处理价格数据：" + pUrl)

    response = urllib.request.urlopen(pUrl)
    jsonString = response.read()

    jsonObject = json.loads(jsonString.decode())

    Price = float(jsonObject[0]['p'])
    pData = pData.append(
        Series(
            [PID, Price],
            index=pColumns
        ), ignore_index=True
    )

    fUrl = "http://item.jd.com/" + PID + ".html"
    print("开始处理属性数据：" + fUrl)
    response = urllib.request.urlopen(fUrl)

    html = response.read()
    soup = BeautifulSoup(html)

    divSoup = soup.find(id="product-detail-2")
    trs = divSoup.find_all('tr')

    for tr in trs:
        tds = tr.find_all('td')
        if len(tds) == 2:
            Feature = tds[0].getText()
            Property = tds[1].getText()
            fData = fData.append(
                Series(
                    [PID, Feature, Property],
                    index=fColumns
                ), ignore_index=True
            )

print(pData)
print(fData)