#!/usr/bin/python
'''
Author: linxuan
Date: 2022-07-09 17:11:54
Description: 使用selenium抓取opensea的数据
FilePath: /stealer/src/opensea.py
'''
from concurrent.futures import ThreadPoolExecutor
import json
import time
import datetime
from typing import List
from selenium.common import exceptions
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement


def get_collections_url(url) -> List[str]:
    '''尽可能得到celections的url
        @param url: opensea对应条目页面的url
        由于opensea的元素界面动态刷新，采取每次滑动获取一次当前界面的url
    '''
    driver = Chrome()
    driver.get(url)
    time.sleep(5)
    collections_url = set()
    # 循环滑动，直到页面底部
    last_height = 0
    while True:
        elements = driver.find_elements(by=By.XPATH, value='/html/body/div[1]/div/main/div/div[2]/div/div/div/div/a')
        for ele in elements:
            # 获取ele的href元素
            try:
                href = ele.get_attribute('href')
                collections_url.add(href)
            except exceptions.StaleElementReferenceException:
                time.sleep(3)
                href = ele.get_attribute('href')
                collections_url.add(href)
        # print(f"add {len(elements)}, have {len(collections_url)} now")
        driver.execute_script('window.scrollBy(0, 1000)')
        time.sleep(3)
        current_height = driver.execute_script(
            "return document.documentElement.scrollTop || window.pageYOffset || document.body.scrollTop;")
        if current_height == last_height:
            break
        last_height = current_height
    # 以list的形式返回
    driver.close()
    return list(collections_url)


def get_collection_info(collection_url, driver: Chrome, max_arts_number=-1) -> dict:
    '''  获取具体colection的信息和对应arts的url
        @param collection_url: collection的url
        @param driver: selenium的driver
        @param max_arts_number: 本collection最大获取的nft数量，-1表示不做限制
        @return: 返回一个dict, 结构为{collection_name:"", author:"", items:"", arts_url:[""], }
    '''
    driver.get(collection_url)
    time.sleep(5)
    colection_name = driver.find_element(
        by=By.XPATH, value='/html/body/div[1]/div/main/div/div/div[3]/div/div/div[1]/div/div[2]/h1').text

    itmes = driver.find_element(
        by=By.XPATH, value='/html/body/div[1]/div/main/div/div/div[5]/div/div[1]/div/div[3]/div/div[2]/button/div/span[1]/div').text
    author = driver.find_element(
        by=By.XPATH, value='/html/body/div[1]/div/main/div/div/div[3]/div/div/div[1]/div/div[2]/h1').text
    pass
    arts_url = set()
    last_height = 0
    while True:
        elements = driver.find_elements(
            by=By.XPATH, value='/html/body/div[1]/div/main/div/div/div[5]/div/div[3]/div[3]/div[3]/div[3]/div[2]/div/div/div/div/article/a')
        for ele in elements:
            # 获取ele的href元素
            try:
                href = ele.get_attribute('href')
                arts_url.add(href)
            except exceptions.StaleElementReferenceException:
                time.sleep(2)
            except exceptions.NoSuchElementException:
                print("NoSuchElementException")
                pass
        # 限制数量
        if max_arts_number != -1 and len(arts_url) >= max_arts_number:
            break
        driver.execute_script('window.scrollBy(0, 1000)')
        time.sleep(2)
        current_height = driver.execute_script(
            "return document.documentElement.scrollTop || window.pageYOffset || document.body.scrollTop;")
        if current_height == last_height:
            break
        last_height = current_height
    arts_url = list(arts_url)
    if max_arts_number != -1 and len(arts_url) > max_arts_number:
        arts_url = arts_url[:max_arts_number]
    return {'collection_name': colection_name, "author": author, 'items': itmes, 'arts_url': arts_url}


def get_art_info(art_url, web) -> dict:
    web.get(art_url)
    info = dict()
    # name, blockchain, price, comtract_address, token_id, token_std, content_address

    def get(xpath: str) -> WebElement:
        return web.find_element(by=By.XPATH, value=xpath)
    name = get('/html/body/div[1]/div/main/div/div/div/div[1]/div/div[1]/div[2]/section[1]/h1').text
    try:
        price = get(
            '/html/body/div[1]/div/main/div/div/div/div[1]/div/div[1]/div[2]/div[1]/div/section/div[2]/div[2]/div[1]/div[2]').text
    except exceptions.NoSuchElementException:  # 物品可能暂时不可购买
        price = "-1"
    try:
        content_address = get(
            '/html/body/div[1]/div/main/div/div/div/div[1]/div/div[1]/div[1]/article/div/div/div/div/img').get_attribute('src')
    except exceptions.NoSuchElementException:
        content_address = ""
    # 点击details按钮
    get('//*[@id="Header assets-item-asset-details"]').click()
    # XXX contract address 和 token id有预览信息和链接两种，这里都存下来
    contract_address = get('//*[@id="Body assets-item-asset-details"]/div/div/div/div[1]/span').text
    token_id = get('//*[@id="Body assets-item-asset-details"]/div/div/div/div[2]/span').text
    token_std = get('//*[@id="Body assets-item-asset-details"]/div/div/div/div[3]/span').text
    blockchain = get('//*[@id="Body assets-item-asset-details"]/div/div/div/div[4]/span').text
    contract_address_src = ''
    if '.' in contract_address:  # 可能有链接
        try:
            contract_address_src = get(
                '//*[@id="Body assets-item-asset-details"]/div/div/div/div[1]/span/a').get_attribute('href')
        except exceptions.NoSuchElementException:
            pass
    token_id_src = ''
    if '.' in token_id:  # 可能有链接
        try:
            token_id_src = get(
                '//*[@id="Body assets-item-asset-details"]/div/div/div/div[2]/span/a').get_attribute('href')
        except exceptions.NoSuchElementException:
            pass

    # 以防万一，先存下来一些没规定的数据
    origin_link = art_url

    info = {'name': name, 'price': price, 'content_address': content_address, 'contract_address': contract_address,
            'token_id': token_id, 'token_std': token_std, 'blockchain': blockchain, 'origin_link': origin_link,
            'token_id_src': token_id_src, 'contract_address_src': contract_address_src}
    return info


def process_collection(collection_url, result_group: list, max_arts_number=-1):
    '''
    处理一个collection的信息，爬去对应的arts_url并存入result_group
    '''
    # 设置全屏启动，防止有元素没加载出来
    opt = Options()
    opt.add_argument('--start-maximized')
    web = Chrome(options=opt)
    try:
        collection_info = get_collection_info(collection_url, web, max_arts_number)
        meta_info = {
            "collection_name": collection_info['collection_name'],
            "author": collection_info['author'],
            "items": collection_info['items'],
        }
        for url in collection_info['arts_url']:
            try:
                art_info = get_art_info(url, web)
                art_info.update(meta_info)
                result_group.append(art_info)
            except Exception as e:
                # print(e.args[0], "when proesss", url)
                print('| ', end='')
            finally:
                time.sleep(3)  # 防爬

        print("success over with result items: ", len(result_group))
    except Exception as e:
        print("faild with collection: ", collection_url)
        print('error: ', e.args[0])
        process_collection.faild_cnt += 1
    finally:
        web.close()


def save_result(result_group: List[dict], file_name):
    if len(result_group) == 0:
        pass
    try:
        with open(file_name, 'w') as f:
            f.write(','.join(result_group[0].keys()) + '\n')
            f.writelines([','.join([str(_) for _ in item.values()]) + '\n' for item in result_group])
            result_group.clear()
    except:
        with open(file_name + '.json') as f:
            f.writelines([json.dumps(item) + '\n' for item in result_group])
    print("save result to file: ", file_name)


def main():
    photography_url = "https://opensea.io/explore-collections?tab=photography-category"
    # collections_url = get_collections_url(photography_url)
    collections_url = [  # 抓去结果
        'https://opensea.io/collection/spaceemotions', 'https://opensea.io/collection/communion', 'https://opensea.io/collection/anxious-pleasures', 'https://opensea.io/collection/phto-all-access-pass', 'https://opensea.io/collection/glitchgods', 'https://opensea.io/collection/drip-drop-by-dave-krugman', 'https://opensea.io/collection/privileged-mediocrity', 'https://opensea.io/collection/timepieces-x-foto', 'https://opensea.io/collection/toronto-icons', 'https://opensea.io/collection/water-and-flow', 'https://opensea.io/collection/street-queens', 'https://opensea.io/collection/flyingcolors', 'https://opensea.io/collection/spannungsbogen', 'https://opensea.io/collection/photoswondersbymohamedboukabouz', 'https://opensea.io/collection/polaris-3', 'https://opensea.io/collection/the-metascapes', 'https://opensea.io/collection/press-play-1', 'https://opensea.io/collection/rumors-of-arctic-belonging', 'https://opensea.io/collection/street-art-cities', 'https://opensea.io/collection/wind-in-her-hair-', 'https://opensea.io/collection/miscommunicated-secrets', 'https://opensea.io/collection/in-ascent', 'https://opensea.io/collection/back-to-light-1', 'https://opensea.io/collection/desert-oddities', 'https://opensea.io/collection/growing-up-travelling', 'https://opensea.io/collection/homegrown-2', 'https://opensea.io/collection/dontgooutside', 'https://opensea.io/collection/icons-8', 'https://opensea.io/collection/editionsxtylersjourney', 'https://opensea.io/collection/the-purpose-of-light', 'https://opensea.io/collection/where-my-vans-go', 'https://opensea.io/collection/liminal-space-2', 'https://opensea.io/collection/dog-days-bogota-by-alec-soth', 'https://opensea.io/collection/afternoon-dolls', 'https://opensea.io/collection/firstdayout', 'https://opensea.io/collection/there-is-life-in-the-streets', 'https://opensea.io/collection/dreamstates', 'https://opensea.io/collection/arro', 'https://opensea.io/collection/bikes-of-burden', 'https://opensea.io/collection/gabs-beach-club', 'https://opensea.io/collection/i-slowly-watched-him-disappear', 'https://opensea.io/collection/reubenwu-limited-editions', 'https://opensea.io/collection/discs-2', 'https://opensea.io/collection/unidentified-contract-yhlywkmkit', 'https://opensea.io/collection/county-fair', 'https://opensea.io/collection/guadalupe-1',
        'https://opensea.io/collection/quantumunlocked', 'https://opensea.io/collection/0-rivers', 'https://opensea.io/collection/shooting-for-the-gold', 'https://opensea.io/collection/slabs-1', 'https://opensea.io/collection/no-feeling-is-final', 'https://opensea.io/collection/by-proxy', 'https://opensea.io/collection/yaasmyn-fula-the-untold-story-of-tupac', 'https://opensea.io/collection/dear-julia', 'https://opensea.io/collection/touching-strangers', 'https://opensea.io/collection/women-unite-10k-assemble', 'https://opensea.io/collection/14th-century-series', 'https://opensea.io/collection/super-user', 'https://opensea.io/collection/dunes-painting', 'https://opensea.io/collection/memory-emotion', 'https://opensea.io/collection/cyanotypes', 'https://opensea.io/collection/blackandwhitedays', 'https://opensea.io/collection/rizzo-collection', 'https://opensea.io/collection/ilhndrop', 'https://opensea.io/collection/afromythology', 'https://opensea.io/collection/math-art-1980-1995', 'https://opensea.io/collection/newyorkers', 'https://opensea.io/collection/segmented-portraits', 'https://opensea.io/collection/blackandwhitenights', 'https://opensea.io/collection/bushes-and-succulents', 'https://opensea.io/collection/people-of-the-sun', 'https://opensea.io/collection/musewhobelieves', 'https://opensea.io/collection/joey-the-photographer', 'https://opensea.io/collection/mh-tl-t-moon-faced', 'https://opensea.io/collection/unseen-emanuele-ferrari', 'https://opensea.io/collection/her-wave', 'https://opensea.io/collection/nue-york', 'https://opensea.io/collection/enough-2', 'https://opensea.io/collection/chromatic-world', 'https://opensea.io/collection/editionsxguido', 'https://opensea.io/collection/chronicles-360', 'https://opensea.io/collection/country-doctor-1', 'https://opensea.io/collection/tokyo-nude', 'https://opensea.io/collection/cyberpunknyc', 'https://opensea.io/collection/outofeden', 'https://opensea.io/collection/tmrw-tomorrow', 'https://opensea.io/collection/betteroffanon', 'https://opensea.io/collection/syzygy-v', 'https://opensea.io/collection/the-lonely-astronaut', 'https://opensea.io/collection/collective-strangers', 'https://opensea.io/collection/keepers-of-the-inn', 'https://opensea.io/collection/stranger-fruit', 'https://opensea.io/collection/luxnoctis', 'https://opensea.io/collection/ycnii']

    result = []
    cnt, last_items = 90, 0
    process_collection.faild_cnt = 0
    print(datetime.datetime.now())
    start_time = datetime.datetime.now()
    save_result([{'a': "a", "b": "b"}], '/resources/test.csv'[1:])
    for url in collections_url[90:]:
        cnt += 1
        process_collection(url, result, max_arts_number=20)
        if cnt % 5 == 0:
            save_result(result, f'/resources/result_to_{cnt}.csv'[1:])
            print(datetime.datetime.now(), "executed: ", (datetime.datetime.now() - start_time).seconds/60, "minutes")
        if len(result) == last_items:
            time.sleep(120)  # 被反爬了，休息一下
        last_items = len(result)
    save_result(result, f'/resources/result_to_{cnt}.csv'[1:])
    pass


# 执行main
if __name__ == "__main__":
    main()
