# -*- coding: utf-8 -*-v

__author__ = 'Leon'

import urllib2, re, sqlite3, os
from BeautifulSoup import BeautifulSoup

PAGE_COUNT = 32
LIST_URL = 'http://www.cloudguide.com.cn/index.php/product/index/id/18/p/{0}/'
HOST_URL = 'http://www.cloudguide.com.cn/'
DATA_BASE = ''

HERE = os.path.dirname(os.path.abspath(__file__))
DATA_FOLDER = os.path.join(HERE, 'data')
DATA_PATH = os.path.join(DATA_FOLDER, 'grape.db')


def run():
    #遍历页码
    for i in range(1, PAGE_COUNT):
        #获取地址
        listurl = LIST_URL.format(i)
        #获取页码地址html
        listhtml = getWebPageContent(listurl).decode('utf8', 'ignore').encode('utf8')
        soup = BeautifulSoup(listhtml)
        #遍历列表
        litags = soup.find('div', attrs={'class': re.compile("paixu")}).findNextSibling('ul').findAll('li')
        for litag in litags:
            p_name = litag.find('img')['alt']
            p_caption_text = litag.find('div', attrs={'class': re.compile('yuncp_cphight')}).text
            p_logo_url = litag.find('img')['src']
            p_detail_url = litag.find('a', attrs={'href': re.compile('/product/view/type/\d+/id/\d+\.html')})['href']

            #print(p_name)
            #print(p_caption)
            #print(p_logo_url)
            print(p_detail_url)

            #获取产品详情html
            detailhtml = getWebPageContent(HOST_URL + p_detail_url).decode('utf8', 'ignore').encode('utf8')
            soup_detail = BeautifulSoup(detailhtml)
            #特色文字s
            p_intro_html = ''.join([unicode(x) for x in soup_detail.find('div', attrs={'class': 'introo'}).contents])
            #概况html
            p_detail_html = ''.join(
                [unicode(x) for x in soup_detail.find('div', attrs={'id': re.compile('menutab_1_1')}).contents])
            #分享区域
            soup_fenxiang = BeautifulSoup(soup_detail.find('div', attrs={'class': 'fenxiang'}).prettify())
            #分类字符串
            p_categorys = []
            for catetag in soup_fenxiang.findAll('a', attrs={'href': re.compile('/product/index/id/\d+\.html')}):
                p_categorys.append(catetag.text)
            p_category_str = '|'.join(p_categorys)
            #所属企业名称
            p_firm_name = soup_fenxiang.find('a', attrs={'href': re.compile('/provider/lists/id/\d+\.html')}).text
            #公司主页地址
            p_firm_url \
                = soup_detail.find('a', attrs={'href': re.compile('http://www.cloudguide.com.cn/url.php.*')})['href']
            p_firm_url = p_firm_url.replace(u'http://www.cloudguide.com.cn/url.php?url=', '')
            #保存到数据库
            db = sqlite3.connect(DATA_PATH)
            db.execute(
                'insert into spider '
                '(name,logourl,captionhtml,introhtml,detailhtml,firmName,firmUrl,category,src_url,src_logourl,src_firmurl)'
                'values (:name,:logourl,:introhtml,:features,:detailhtml,:firmName,:firmUrl,:category,:src_url,:src_logourl,:src_firmurl) '
                , {'name': p_name, 'logourl': p_logo_url, 'caption': p_caption_text, 'introhtml': p_intro_html,
                   'detailhtml': p_detail_html,
                   'firmName': p_firm_name, 'firmUrl': p_firm_url, 'category': p_category_str, 'src_url': p_detail_url,
                   'src_logourl': p_logo_url, 'src_firmurl': p_firm_url})
            db.commit()
            db.close()


def import_csdn():
    html_path_pingtai = ""
    html_path_chunchu = ""
    html_path_zhuji = ""
    html_path_yingyong = ""
    html_path_isv = ""
    pass


def getWebPageContent(url):
    f = urllib2.urlopen(url)
    data = f.read()
    f.close()
    return data


# main
if __name__ == '__main__':
    run()