3 examples of 'python google search' in Python

Every line of 'python google search' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
15def google_search(query, limit):
16 global last
17 ret_url_list = list()
18
19 for tries in range(1, 10):
20 try:
21 if last:
22 sleep(int(60 - (clock() - last)))
23 except ValueError:
24 pass
25
26 last = clock()
27
28 try:
29 for url in google_web_search(query, stop=limit):
30 if 'youtube.com/watch?v=' in url:
31 ret_url_list.append(url.split('&')[0])
32
33 except KeyboardInterrupt:
34 raise
35
36 except HTTPError as e:
37 print('google search service unavailable.')
38
39 if tries > 3:
40 print('Failed to download google search result. Reason: ' + str(e))
41 raise
42
43 print('Failed to download google search result, retrying. Reason: ' + str(e))
44 sleep(1)
45
46 except:
47 e = sys.exc_info()[0]
48 if tries > 3:
49 print('Failed to download google search result. Reason: ' + str(e))
50 raise
51
52 print('Failed to download google search result, retrying. Reason: ' + str(e))
53 sleep(1)
54 else:
55 break
56
57 return ret_url_list[:limit]
11def google_search(keywords: str) -> List[Dict[str, str]]:
12 query = {'q': keywords}
13 # Gets the page
14 page = requests.get('http://www.google.com/search', params=query)
15 # Parses the page into BeautifulSoup
16 soup = BeautifulSoup(page.text, "lxml")
17
18 # Gets all search URLs
19 anchors = soup.find(id='search').findAll('a')
20 results = []
21
22 for a in anchors:
23 try:
24 # Tries to get the href property of the URL
25 link = a['href']
26 except KeyError:
27 continue
28 # Link must start with '/url?', as these are the search result links
29 if not link.startswith('/url?'):
30 continue
31 # Makes sure a hidden 'cached' result isn't displayed
32 if a.text.strip() == 'Cached' and 'webcache.googleusercontent.com' in a['href']:
33 continue
34 # a.text: The name of the page
35 result = {'url': "https://www.google.com{}".format(link),
36 'name': a.text}
37 results.append(result)
38 return results
13def __init__(self, query='hello world', num=10, start=0, recent=None, site='', pages=1, sleep=True):
14 self.query = '+'.join(query.split(' '))
15 self.num = num
16 self.start = start
17 self.recent = recent
18 self.site = site
19 self.pages = pages
20 self.sleep = sleep
21 self.headers = {'user-agent': 'Mozilla/5.0'}
22 self.big_soup = BeautifulSoup("", 'html.parser')

Related snippets