﻿# -*- coding: utf-8 -*-
# @Author  : Feng Cheng
# @Email   : fengcheng@pku.edu.cn

# -*- coding: utf-8 -*-
from selenium import webdriver
import codecs
import time
import winsound

import re
# from lxml import html
import pdb


def getPrice(strPrice):
    # print(strPrice.encode('utf-8'))
    # print(type(strPrice.encode('utf-8')))
    flt = filter(lambda xx: xx.isdigit(), strPrice)
    strData = ''.join(flt)
    return int(strData) * 1.0 / 100


def crawler():
    file_output = codecs.open('thefile.csv', 'w', 'utf_8_sig')
    print("正在抓取 抱财网")
    url = 'https://www.baocai.com/invest/change/index.html'
    i = 0
    browser = webdriver.Chrome()
    # browser = webdriver.firefox()
    browser.implicitly_wait(20)
    browser.set_page_load_timeout(20)
    browser.set_script_timeout(20)
    browserList = []
    exist = {}
    try:
        browser.get(url)
    except:
        print('timeout')
    k = 1
    while 1:
        print("刷新第" + str(k) + "遍")

        time.sleep(1)
        price = browser.find_elements_by_xpath('//div[@class="list_detail"]/ul/li/div/span[@style="color:#ff8142;"]')
        urls = browser.find_elements_by_xpath('//div[@class="item_right"]/div[@class="list_bottom"]/div/a')
        for x in range(8):
            priceNow = getPrice(price[x * 2].text)
            Revneue = getPrice(price[x * 2 + 1].text)
            url = urls[x].get_attribute('href')
            discont = priceNow / (priceNow + Revneue) * 100
            if url in exist:
                print("该标的已存在")
                continue
            else:
                exist[url] = 1
            print(url)
            print('Now Price:', priceNow, '\t Revenue:', Revneue, '\t  Rate of return:', Revneue / priceNow * 100,
                  '%\t rate discount:', discont, '%')
            if discont < 40:
                # print("!!!!!!!!!!!!!!!!!!!")
                def newBrowser(url, browserList):
                    browser = webdriver.Chrome()
                    # browser = webdriver.firefox()
                    browser.implicitly_wait(20)
                    browser.set_page_load_timeout(20)
                    browser.set_script_timeout(20)
                    browserList.append(browser)
                    try:
                        browser.get(url)
                    except:
                        print('timeout')

                newBrowser(url, browserList=browserList)

                winsound.Beep(800, 10000)
        try:
            browser.refresh()
        except:
            print('timeout')
        time.sleep(3)
        k += 1


if __name__ == "__main__":
    crawler()
