2020-06-14 21:29:42 +02:00
|
|
|
from re import findall
|
|
|
|
from typing import List
|
|
|
|
|
|
|
|
from bs4 import BeautifulSoup
|
2020-05-21 18:45:51 +02:00
|
|
|
from requests import get
|
2020-06-12 19:21:50 +02:00
|
|
|
|
2020-06-14 21:29:42 +02:00
|
|
|
from constants import FLICKR_URL, URL
|
2020-05-21 18:45:51 +02:00
|
|
|
|
|
|
|
|
2020-06-05 13:48:47 +02:00
|
|
|
def format_url(dataset) -> str:
|
|
|
|
"""
|
|
|
|
Constructs the API's URL for the requested dataset
|
|
|
|
"""
|
|
|
|
link = URL.format(dataset)
|
|
|
|
return link
|
|
|
|
|
|
|
|
|
|
|
|
def request_dataset(dataset):
|
|
|
|
"""
|
|
|
|
Fetches the requested dataset from opendata's API
|
2020-06-12 19:21:50 +02:00
|
|
|
Raises an exception if there's an HTTP error
|
2020-06-05 13:48:47 +02:00
|
|
|
"""
|
|
|
|
url = format_url(dataset)
|
2020-05-22 20:58:52 +02:00
|
|
|
response = get(url)
|
|
|
|
response.raise_for_status()
|
|
|
|
data = response.json()
|
2020-06-13 21:58:17 +02:00
|
|
|
return data
|
2020-06-14 21:29:42 +02:00
|
|
|
|
|
|
|
|
|
|
|
def request_flickr(keywords) -> str:
|
|
|
|
"""
|
|
|
|
Returns the HTML of a Flickr search
|
|
|
|
"""
|
|
|
|
search_url = FLICKR_URL.format(keywords)
|
|
|
|
result = get(search_url)
|
|
|
|
html = result.text
|
|
|
|
return html
|
|
|
|
|
|
|
|
|
|
|
|
def scrap_flickr(keywords) -> List[str]:
|
|
|
|
"""
|
|
|
|
Creates a list of image links from a Flickr search
|
|
|
|
"""
|
|
|
|
html = request_flickr(keywords)
|
|
|
|
soup = BeautifulSoup(html, features="html.parser")
|
|
|
|
images = soup.find_all(
|
|
|
|
"div", class_="view photo-list-photo-view requiredToShowOnServer awake",
|
|
|
|
)
|
|
|
|
image_links = findall("(live.staticflickr.com/\S+.jpg)", str(images))
|
|
|
|
return image_links
|