import urllib.request
import json
from dao.pymysql_utils import pymysql_util
# https://s.taobao.com/api?m=customized&q=%E5%88%9D%E9%9F%B3%E6%9C%AA%E6%9D%A5&cat=50016564&s=0
# https://s.taobao.com/search?q=%E5%88%9D%E9%9F%B3%E6%9C%AA%E6%9D%A5&cat=50016564&s=0
class mikuGK_spider:
    def __init__(self):
        self.url = "https://s.taobao.com/api?m=customized&q=%E5%88%9D%E9%9F%B3%E6%9C%AA%E6%9D%A5&cat=50016564&s="
        self.page = 0
        self.User_Agent = {
            "User-Agent": "Mozilla/5.0(Windows;U;WindowsNT6.1;en-us)AppleWebKit/534.50(KHTML,likeGecko)Version/5.1Safari/534.50"
        }
        self.raw_titles = []  # raw_title 标题
        self.detail_urls = []  # detail_url 详细页面地址
        self.view_prices = []  # view_price 价格
        self.view_fees = []  # view_fee 邮费
        self.item_locs = []  # item_loc 商品发货地
        self.view_saless = []  # view_sales 付款人数
        self.comment_counts = []  # comment_count 评论数
        self.nicks = []  # nick 卖家名称

    def get_html(self):
        full_url = self.url + str(self.page)
        # 发送请求
        request = urllib.request.Request(full_url, headers=self.User_Agent)
        # 接受响应
        respones = urllib.request.urlopen(request)
        html = respones.read().decode("utf-8") # 获取页面源代码
        self.save_full_data(html) # 将所有数据存入本地文件
        self.html_dict = json.loads(html)  # 将json字符串转换成python对象（字典）
        self.html_len = len(html)  # 数据的长度

    # 将所有数据存入本地文件
    def save_full_data(self,html):
        with open("../data/fulldata/%s~%s.json"%(self.page,self.page+12), mode="w",encoding="utf-8") as f:
            f.write(html)
            print("将所有数据存入本地文件")

    # 获取一页的信息
    def get_one_page_message(self):
        self.get_html()   # 获取页面源代码
        raw_title = []   # raw_title 标题
        detail_url = []   # detail_url 详细页面地址
        view_price = []   # view_price 价格
        view_fee = []   # view_fee 邮费
        item_loc = []   # item_loc 商品发货地
        view_sales = []   # view_sales 付款人数
        comment_count = []   # comment_count 评论数
        nick = []     # nick 卖家名称
        try:
            for i in range(12):
                raw_title.append(self.html_dict["API.CustomizedApi"]["itemlist"]["auctions"][i]["raw_title"])
                detail_url.append(self.html_dict["API.CustomizedApi"]["itemlist"]["auctions"][i]["detail_url"])
                view_price.append(self.html_dict["API.CustomizedApi"]["itemlist"]["auctions"][i]["view_price"])
                view_fee.append(self.html_dict["API.CustomizedApi"]["itemlist"]["auctions"][i]["view_fee"])
                item_loc.append(self.html_dict["API.CustomizedApi"]["itemlist"]["auctions"][i]["item_loc"])
                view_sales.append(self.html_dict["API.CustomizedApi"]["itemlist"]["auctions"][i]["view_sales"])
                comment_count.append(self.html_dict["API.CustomizedApi"]["itemlist"]["auctions"][i]["comment_count"])
                nick.append(self.html_dict["API.CustomizedApi"]["itemlist"]["auctions"][i]["nick"])

            self.raw_titles += raw_title
            self.detail_urls += detail_url
            self.view_prices += view_price
            self.view_fees += view_fee
            self.item_locs += item_loc
            self.view_saless += view_sales
            self.comment_counts += comment_count
            self.nicks += nick
        except:
            pass

    # 将数据存入数据库
    def save_in_mysql(self):
        m_utils = pymysql_util("localhost","root","root",3306,"carry")
        zip_message = zip(self.raw_titles,self.detail_urls,self.view_prices,self.view_fees, self.item_locs,self.view_saless,self.comment_counts,self.nicks)
        for each in zip_message:
            sql = "insert into mikugk(raw_title,detail_url,view_price,view_fee,item_loc,view_sales,comment_count,nick) values(%s,%s,%s,%s,%s,%s,%s,%s)"
            m_utils.insert(sql, each)
        print("将数据存入数据库")

if __name__=="__main__":
    spider = mikuGK_spider()  # 创建爬虫对象
    i = 0
    while True:
        i += 1
        print("爬取第%s"%(i)+"页")
        spider.get_one_page_message() # 爬取一页数据
        spider.page += 12   # 页码加12
        if spider.html_len < 10:   # 如果爬取到的页面没有数据，则
            break
    spider.save_in_mysql() # 将数据存入数据库




