import requests
from slugify import slugify
from os.path import exists
from os import listdir,mkdir
from time import sleep
from random import uniform
from re import sub
import pickle

import logging, sys
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
logging.debug('Running scrapper…')


# from date import date


def scrap(urls, slug="scrapped", headers = headers, cookies=cookies, sleeptime=15):

    path = "data/"+slug+".bin"
    picklade = pickle.open(path)
    for url in urls:

        # if not exists(path):
            logging.info('Getting page ',url,', for ', slug,'...')
            # try:


            webpage = requests.get(url, headers=headers,cookies=cookies)
            if webpage.status_code == 500:
                logging.error("Error 500, waiting 2mins…")
                sleep(120)
            else:
                webpage.raise_for_status()

            picklade.add(webpage)

            print('Page ', url,' added to ', path)
            
            print('Sleeping ', r, ' seconds…')
            r = uniform(sleeptime/2,1.5*sleeptime)
            sleep(r)
            # print('\n')
    return paths

# urls= open('urls_nontwitter.txt',r)
# urls=urls.read()



# browser scraping
# https://www.facebook.com/groups/<GROUP_NAME>/photos/.
# Selenium: driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# execute_script("return document.documentElement.outerHTML")
# find_elements_by_xpath("//*")
# todo:
# use pickle

import requests
import re, os
from scrapper import scrap

GROUP_ID = 1495297207159833


# def build_pages_urls(root="https://m.facebook.com/media/set/?set=g.%i&refid=56&s=" % GROUP_ID):
#     s = 12
#     pages_urls = set()
#     while(s < 10000): # todo detect end
#         pages_urls.add(root + str(s))
#         s = s+12
#     return pages_urls


# pages_urls = build_pages_urls()
# print(pages_urls)


# NOT FINISHED!
# album_paths=scrap(pages_urls)

def parse_photos_urls(folder='scrapped'):
    photos_paths= set()
    pages_urls= os.listdir(folder)
    # logging.debug("pages_urls: ",pages_urls)
    for path in pages_urls:
        # https://m.facebook.com/photo.php?fbid=2527775210565904&id=100000003502426&set=gm.2327710240585188&source=43&refid=56
        # https://m.facebook.com/photo/view_full_size/?fbid=2527775210565904&ref_component=mbasic_photo_permalink&ref_page=%2Fwap%2Fphoto.php&refid=13&__tn__=%2Cg
        regex = re.compile(
            # "https:\/\/m\.facebook\.com\/photo\.php\?fbid=[0-9]+&id=[0-9]+&set=gm\.[0-9]+&source=43&refid=56")
            "fbid=[0-9]+")
        # logging.debug(regex)
        # logging.debug(path)
        file = open(folder+"/"+path)
        html = file.read()

        fbids = re.findall(regex, html)
        logging.debug(fbids)

        # imgs_urls=set()
        # html_urls=set()
        for fbid in fbids:
            img_url = "https://m.facebook.com/photo/view_full_size/?"+fbid
            logging.debug(img_url)

            html_url = "https://m.facebook.com/photo.php?"+fbid
    # print('Adding: '+urls_in_html+'\n')
            # print('Adding: '+img+'\n')
            photo_path={"fbid":fbid,"html":html_url,"img_url":img_url}
            photos_paths.add(photo_path)
        # photos_paths.add(urls_in_html)

        logging.debug(photos_paths)
    return photos_paths

# os.mkdir('ndt')

# photos = open('ndt/','w+')
# photos_urls_f = open('ndt/photos_urls','w+')
# photos_urls=parse_photos_urls('scrapped')
# for url in photos_urls:
    # photos_urls_f.write(url)



# scrap(photos_urls, folder="ndt")


#----

# cat /Users/hadrien/Desktop/neurchi\ de\ templates.html | grep -o 'https://www.facebook.com/photo.php?fbid=[0-9]*' | sort | uniq > urls.txt
# urls=$(cat urls.txt)
# for url in $urls
# do
#     #  curl $url
#     #  curl $url -H 'user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36' -H 'accept: text/html,application/xhtml+xml,application/xml' -H $COOKIES | tee photos/${url: -24}.html
#     #  curl $url -H 'accept-encoding: gzip,deflate,sdch' -H 'accept-language: en-US,zh-CN;q=0.8' -H 'user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36' -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8' -H 'cache-control: max-age=0' -H $COOKIES --compressed > photos/${url: -24}.html
#     curl $url -H 'user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36' -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' -H 'authority: www.facebook.com' -H 'cookie: a11y=%7B%22sr%22%3A0%2C%22sr-ts%22%3A1468910907621%2C%22jk%22%3A4%2C%22jk-ts%22%3A1484658107561%2C%22kb%22%3A0%2C%22kb-ts%22%3A1468910907621%2C%22hcm%22%3A0%2C%22hcm-ts%22%3A1468910907621%7D; m_pixel_ratio=2; x-referer=eyJyIjoiL1Nvc2NobGFnLyIsImgiOiIvU29zY2hsYWcvIiwicyI6Im0ifQ%3D%3D; datr=7s0zVVV2hmxBMs3m8gG3ftVF; locale=en_GB; pl=n; sb=4PEGV0DytQHyDAjBYJlsFe_3; js_ver=3269; spin=r.4641150_b.trunk_t.1544884592_s.1_v.2_; c_user=591041828; xs=5%3ApIKmQ_kn4Rn81g%3A2%3A1544648810%3A17895%3A11679; fr=1tPANCzV6Hx58jnJf.AWVa1vEvag7k4m0kW1Jf3Yn81as.Bbxx8q._w.FwT.0.0.BcFSvr.AWVBM8-S; act=1544892664518%2F8; wd=1422x402;' --compressed > photos/${url: -24}.html
#     python -c 'import bs4;a=soup.findAll("img", {"class": "spotlight"});for b in a: echo b.src;'
#     sleep 1s
#     # sleep $[ ( $RANDOM % 10 ) ]s
# done

# # <img class="spotlight" alt="Milo Stocker's photo." aria-busy="false" src="https://scontent-bru2-1.xx.fbcdn.net/v/t1.0-9/48372164_2306791606221151_6080852497683972096_n.jpg?_nc_cat=110&amp;_nc_ht=scontent-bru2-1.xx&amp;oh=0492af2668c4523f92ffa427fa0f40c0&amp;oe=5CB00E43" style="width: 960px; height: 720px;">

# grep -o '*.jpg' photos/*.html | tee urls_images.txt
# # grep -o '[^""]*.jpg' photos/*.html | sed 's/fbPhotoImage" src=//' | sed 's/"//g' | sed "s/\.html:/ /" | tee urls_images.txt
# # grep -o 'fbPhotoImage" src="https[^""]*.jpg"' photos/*.html | sed 's/fbPhotoImage" src=//' | sed 's/"//g' | sed "s/\.html:/ /" | tee urls_images.txt
# # Also grep .hasCaptions

