web scraper using python code example
Example 1: python web scraping
import requests
from bs4 import BeautifulSoup
URL = 'https://www.monster.com/jobs/search/?q=Software-Developer&where=Australia'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
Example 2: web scraping python
import os
import requests
from bs4 import BeautifulSoup
url = "https://www.google.com/"
reponse = requests.get(url)
if reponse.ok:
soup = BeautifulSoup(reponse.text, "lxml")
title = str(soup.find("title"))
title = title.replace("<title>", "")
title = title.replace("</title>", "")
print("The title is : " + str(title))
os.system("pause")
Example 3: web scraping python
import scrapy
from ..items import SampletestItem
class QuoteTestSpider(scrapy.Spider):
name = 'quote_test'
start_urls = ['https://quotes.toscrape.com/']
def parse(self, response):
items = SampletestItem()
quotes = response.css("div.quote")
for quote in quotes:
items['title'] = quote.css("span.text::text").get()
items['author'] = quote.css(".author::text").get()
items['tags'] = quote.css(".tags .tag::text").getall()
yield items
next_page = response.css(".next a::attr(href)").get()
if next_page is not None:
next_url = response.urljoin(next_page)
yield scrapy.Request(next_url, callback=self.parse)
Example 4: web scraper python
def get_names():
"""
Downloads the page where the list of mathematicians is found
and returns a list of strings, one per mathematician
"""
url = 'http://www.fabpedigree.com/james/mathmen.htm'
response = simple_get(url)
if response is not None:
html = BeautifulSoup(response, 'html.parser')
names = set()
for li in html.select('li'):
for name in li.text.split('\n'):
if len(name) > 0:
names.add(name.strip())
return list(names)
raise Exception('Error retrieving contents at {}'.format(url))
Example 5: web scraper python
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)