import requests
from   bs4 import BeautifulSoup
from   tqdm import tqdm
import time
import pandas as pd
import csv

from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all" 

#https://www.coincola.com/ad-detail/100000-600000
trade_urls = 'https://localbitcoins.com/ad/743675/purchase-bitcoin-bank-transfer-china-china'


#由于网站禁止爬虫，可以在请求加上头信息，伪装成浏览器访问.
headers = {
    'User-Agent': 'Mozilla/5.0'
}

price_data = []
paymnet_data = []
username_data = []
trade_data = []
location_data = []
paymentwindows_data = []
advertisement_data = []


def save_csv():
    #price paymentmethod, username, rating, trade limits, location, paymentwindows, advertisement
    table=pd.DataFrame({'报价':price_data,'支付方式':paymnet_data,'名字':username_data,'限制':trade_data,'国家':location_data,'时间':paymentwindows_data,'留言':advertisement_data})
    date = time.strftime('%Y%m%d',time.localtime(time.time()))
    table.to_csv(r'localbitcoin%s.csv' % (date),encoding = "utf_8_sig")
    
    return


def get_ad_info(url):
    
        
    try:
         r = requests.get(url, headers=headers)
         r.raise_for_status()    # 如果响应状态码不是 200，就主动抛出异常
    
    except requests.RequestException as e:
        print(e)
    else:
        soup = BeautifulSoup(r.text, 'html5lib')  # html5lib为pythonista上的解析器
    
        try:   
            #price paymentmethod, username, rating, trade limits, location, paymentwindows, advertisement
            index = 0 
            v1=v2=v3=v4=v5=v6=v7=''
            
            for div in soup.find_all('div',class_='col-md-8'):
                if index == 0:
                    v1 = div.get_text().strip().replace(' ', '').replace('\n', '').replace('\t', '').replace('\r', '').strip()
                elif index == 1:
                    v2 = div.get_text().strip().replace(' ', '').replace('\n', '').replace('\t', '').replace('\r', '').strip() 
                elif index == 2:
                    v3 = div.get_text().strip().replace(' ', '').replace('\n', '').replace('\t', '').replace('\r', '').strip()                  
                elif index == 3:
                    v4 = div.get_text().strip().replace(' ', '').replace('\n', '').replace('\t', '').replace('\r', '').strip()                   
                elif index == 4:
                    v5 = div.get_text().strip().replace(' ', '').replace('\n', '').replace('\t', '').replace('\r', '').strip()                  
                elif index == 5:
                    v6 = div.get_text().strip().replace(' ', '').replace('\n', '').replace('\t', '').replace('\r', '').strip()                   
                elif index == 6:
                    v7 = div.get_text().strip().replace('\n', '').replace('\t', '').replace('\r', '').strip()  
                    break
                                    
                index = index +1 
                
            print (v1,v2,v3,v4,v5,v6,v7) 
            
            price_data.append(v1)
            paymnet_data.append(v2)
            username_data.append(v3)
       
            trade_data.append(v4)
            location_data.append(v5)
            paymentwindows_data.append(v6)
            advertisement_data.append(v7)
            
            save_csv()
            
        except (AttributeError,TypeError):
            pass
   
    return

   
def get_next_page(url):
    try:
        r = requests.get(url, headers=headers)
        r.raise_for_status()    # 如果响应状态码不是 200，就主动抛出异常
    
    except requests.RequestException as e:
        print(e)
    else:
        soup = BeautifulSoup(r.text, 'html5lib')  # html5lib为pythonista上的解析器
    
        # get ad bun list
        try:
            buts=soup.find_all('a',class_='btn btn-default megabutton')
            for item in buts:
                butlink  = item.get('href')
                openlink = "https://localbitcoins.com/%s"%(butlink)
                print(openlink)
                get_ad_info (openlink)
                time.sleep(5)
        except (AttributeError,TypeError):
            pass
        
        # get next page 
        
        try:
            for item in soup.find_all('a'):
                nexttitle = item.get('title')
                nextpage  = item.attrs['href']
                print( "Found the URL:", item.get('title'), item.attrs['href'])  #Next Page ?page=2
                if nexttitle == 'Next Page':
                    if  nextpage!='#':
                        print(nextpage,nextpage[-1])
                        return int(nextpage[-1])
                    else:
                        return 1000
            
        except (AttributeError,TypeError):
            pass
    

#读取CSV
filepath = 'current.csv'
csv_reader = csv.reader(open(filepath, encoding='utf-8'))

for row in csv_reader:
    currency = row[0]
    #buy
    buypage = 1
    while buypage!=1000:
        buyurl  = "https://localbitcoins.com/buy-bitcoins-online/%s/?page=%d"%(currency,buypage)
        print(buyurl)
        buypage = get_next_page(buyurl)
        print('buypage=',buypage)
        if (buypage== None ):
            break

    sellpage = 1     
    while sellpage!=1000:
        sellurl  = "https://localbitcoins.com/sell-bitcoins-online/%s/?page=%d"%(currency,sellpage)
        print(sellurl)
        sellpage = get_next_page(sellurl)
        print('sellpage=',sellpage)     
        if (sellpage== None):
            break
    
   
		


