5 examples of 'how to extract data from html file using python' in Python

Every line of 'how to extract data from html file using python' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
this disclaimer
68def soup_extract(file):
69 return BeautifulSoup(file.read()).get_text()
Important

Use secure code every time

Secure your code as it's written. Use Snyk Code to scan source code in minutes – no build needed – and fix issues immediately. Enable Snyk Code

39def main():
40 # setup command line arguments
41 parser = argparse.ArgumentParser(description='Postprocessor for generating'
42 ' chunked html files.')
43
44 parser.add_argument('FILENAME', help='Input file name (or directory if '
45 'watching)')
46
47
48 args = parser.parse_args()
49
50 html_doc = args.FILENAME
51
52 with open(html_doc) as fp:
53 soup = BeautifulSoup(fp,"html5lib")
54
55 # perform the id mapping
56 map_ids(soup)
57
58 # update any modified links
59 for l in soup.find_all(href=link_list):
60 original = l['href']
61 l['href']= link_remap[original]
62
63 # add class to the blockquotes/asides
64 for b in soup.find_all('blockquote'):
65 for s in b.find_all('strong'):
66 b['class'] = s.string
67 break
68
69 # overwrite the original file
70 chw = open(html_doc,'w')
71 chw.write(str(soup))
72 chw.close()
63def __call__(self, data):
64 filename = data[FILE_KEY]
65 try:
66 with open(filename) as fh:
67 tree = parse_html(fh)
68 text_iter = chain.from_iterable(
69 [node.itertext() for node in tree.xpath(self.xpath)])
70 raw_tokens = chain.from_iterable(
71 [self.pattern.findall(text) for text in text_iter])
72 tokens = list(map(self.transform, raw_tokens))
73 data[self.target_key] = tokens
74 except Exception as ex:
75 LOGGER.warn('reading %s => %s', filename, ex)
76
77 return data
8def get_html(url):
9 from PyQt5.QtCore import QUrl, QTimer
10 from PyQt5.QtWidgets import QApplication
11 from PyQt5.QtWebEngineWidgets import QWebEnginePage
12
13 class ExtractorHtml:
14 def __init__(self, url):
15 _app = QApplication([])
16 self._page = QWebEnginePage()
17
18 self.html = None
19
20 # Небольшой костыль для получения содержимого страницы сайта
21 # https://www.origin.com/rus/ru-ru/search?searchString=
22 # Загрузка страницы проходит постепенно -- данные не сразу появляются, поэтому нужно
23 # подождать пока они закончатся загружаться. Для этого заводится таймер, который дает по 5 секунд
24 # после каждой закончившееся загрузки чтобы после вытащить данные из страницы
25 timer = QTimer()
26 timer.setSingleShot(True)
27 timer.setInterval(5000)
28 timer.timeout.connect(self._load_finished_handler)
29
30 self._page.loadProgress.connect(lambda x: x == 100 and timer.start())
31
32 self._page.load(QUrl(url))
33
34 # Ожидание загрузки страницы и получения его содержимого
35 # Этот цикл асинхронный код делает синхронным
36 while self.html is None:
37 _app.processEvents()
38
39 _app.quit()
40
41 self._page = None
42
43 def _callable(self, data):
44 self.html = data
45
46 def _load_finished_handler(self):
47 self._page.toHtml(self._callable)
48
49 return ExtractorHtml(url).html
6def parse(filename='example.html'):
7 xml=loadpage(filename)
8 cells=getcells(xml)
9 row=makerow(cells)
10 print row

Related snippets