get all links from a website python beautifulsoup code example

Example 1: BeautifulSoup - scraping the link of the website

import requests
from bs4 import BeautifulSoup

page = requests.get('http://www.example.com')
soup = BeautifulSoup(page.content, 'html.parser')

print(soup.select_one('p a').attrs['href'])  # get the link of the website

Example 2: get all href links beautifulsoup from a website python

from BeautifulSoup import BeautifulSoupimport urllib2import redef getLinks(url):    html_page = urllib2.urlopen(url)    soup = BeautifulSoup(html_page)    links = []    for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):        links.append(link.get('href'))    return linksprint( getLinks("https://arstechnica.com") )

Example 3: get all href links beautifulsoup from a website python

from BeautifulSoup import BeautifulSoupimport urllib2import rehtml_page = urllib2.urlopen("https://arstechnica.com")soup = BeautifulSoup(html_page)links = []for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):    links.append(link.get('href'))print(links)