web scraping with python tutorial code example

Example 1: python web scraping

import requests
from bs4 import BeautifulSoup

URL = 'https://www.monster.com/jobs/search/?q=Software-Developer&where=Australia'
page = requests.get(URL)

soup = BeautifulSoup(page.content, 'html.parser')

Example 2: web scraping python

#pip install beautifulsoup4

import os
import requests
from bs4 import BeautifulSoup

url = "https://www.google.com/"
reponse = requests.get(url)

if reponse.ok:
	soup = BeautifulSoup(reponse.text, "lxml")
	title = str(soup.find("title"))

	title = title.replace("<title>", "")
	title = title.replace("</title>", "")
	print("The title is : " + str(title))

os.system("pause")

#python (code name).py

Example 3: python web scraping

# basic web scraping with python
# Import libraries
import requests
import urllib.request
import time
from bs4 import BeautifulSoup

# Set the URL you want to webscrape from
url = 'http://web.mta.info/developers/turnstile.html'

# Connect to the URL
response = requests.get(url)

# Parse HTML and save to BeautifulSoup object¶
soup = BeautifulSoup(response.text, "html.parser")

# To download the whole data set, let's do a for loop through all a tags
line_count = 1 #variable to track what line you are on
for one_a_tag in soup.findAll('a'):  #'a' tags are for links
    if line_count >= 36: #code for text files starts at line 36
        link = one_a_tag['href']
        download_url = 'http://web.mta.info/developers/'+ link
        urllib.request.urlretrieve(download_url,'./'+link[link.find('/turnstile_')+1:]) 
        time.sleep(1) #pause the code for a sec
    #add 1 for next line
    line_count +=1

Example 4: web scraping python

import scrapy
from ..items import SampletestItem #items class

class QuoteTestSpider(scrapy.Spider):
    name = 'quote_test'
    start_urls = ['https://quotes.toscrape.com/']

    def parse(self, response):
        items = SampletestItem() #items class
        quotes = response.css("div.quote")
        for quote in quotes:
            items['title'] = quote.css("span.text::text").get()
            items['author'] = quote.css(".author::text").get()
            items['tags'] = quote.css(".tags .tag::text").getall()
            
            yield items
            next_page = response.css(".next a::attr(href)").get()
            if next_page is not None:
                next_url = response.urljoin(next_page)
                yield scrapy.Request(next_url, callback=self.parse)

Tags:

Html Example