| 
									
										
										
										
											2018-08-18 19:24:02 +02:00
										 |  |  | """
 | 
					
						
							|  |  |  |  Duden | 
					
						
							|  |  |  |  @website     https://www.duden.de | 
					
						
							|  |  |  |  @provide-api no | 
					
						
							|  |  |  |  @using-api   no | 
					
						
							|  |  |  |  @results     HTML (using search portal) | 
					
						
							|  |  |  |  @stable      no (HTML can change) | 
					
						
							|  |  |  |  @parse       url, title, content | 
					
						
							|  |  |  | """
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | from lxml import html, etree | 
					
						
							|  |  |  | import re | 
					
						
							| 
									
										
										
										
											2020-08-06 17:42:46 +02:00
										 |  |  | from urllib.parse import quote, urljoin | 
					
						
							| 
									
										
										
										
											2020-10-02 18:13:56 +02:00
										 |  |  | from searx.utils import extract_text, eval_xpath | 
					
						
							| 
									
										
										
										
											2018-08-18 19:24:02 +02:00
										 |  |  | from searx import logger | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | categories = ['general'] | 
					
						
							|  |  |  | paging = True | 
					
						
							|  |  |  | language_support = False | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | # search-url | 
					
						
							|  |  |  | base_url = 'https://www.duden.de/' | 
					
						
							| 
									
										
										
										
											2019-07-25 08:17:45 +02:00
										 |  |  | search_url = base_url + 'suchen/dudenonline/{query}?search_api_fulltext=&page={offset}' | 
					
						
							| 
									
										
										
										
											2018-08-18 19:24:02 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | def request(query, params): | 
					
						
							|  |  |  |     '''pre-request callback
 | 
					
						
							|  |  |  |     params<dict>: | 
					
						
							|  |  |  |       method  : POST/GET | 
					
						
							|  |  |  |       headers : {} | 
					
						
							|  |  |  |       data    : {} # if method == POST | 
					
						
							|  |  |  |       url     : '' | 
					
						
							|  |  |  |       category: 'search category' | 
					
						
							|  |  |  |       pageno  : 1 # number of the requested page | 
					
						
							|  |  |  |     '''
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     offset = (params['pageno'] - 1) | 
					
						
							| 
									
										
										
										
											2019-07-25 08:17:45 +02:00
										 |  |  |     if offset == 0: | 
					
						
							|  |  |  |         search_url_fmt = base_url + 'suchen/dudenonline/{query}' | 
					
						
							|  |  |  |         params['url'] = search_url_fmt.format(query=quote(query)) | 
					
						
							|  |  |  |     else: | 
					
						
							|  |  |  |         params['url'] = search_url.format(offset=offset, query=quote(query)) | 
					
						
							| 
									
										
										
										
											2018-08-18 19:24:02 +02:00
										 |  |  |     return params | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | def response(resp): | 
					
						
							|  |  |  |     '''post-response callback
 | 
					
						
							|  |  |  |     resp: requests response object | 
					
						
							|  |  |  |     '''
 | 
					
						
							|  |  |  |     results = [] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     dom = html.fromstring(resp.text) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     try: | 
					
						
							| 
									
										
										
										
											2019-11-15 09:31:37 +01:00
										 |  |  |         number_of_results_string =\ | 
					
						
							|  |  |  |             re.sub('[^0-9]', '', | 
					
						
							|  |  |  |                    eval_xpath(dom, '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0]) | 
					
						
							| 
									
										
										
										
											2018-08-18 19:24:02 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  |         results.append({'number_of_results': int(number_of_results_string)}) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     except: | 
					
						
							|  |  |  |         logger.debug("Couldn't read number of results.") | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-11-15 09:31:37 +01:00
										 |  |  |     for result in eval_xpath(dom, '//section[not(contains(@class, "essay"))]'): | 
					
						
							| 
									
										
										
										
											2018-08-18 19:24:02 +02:00
										 |  |  |         try: | 
					
						
							| 
									
										
										
										
											2019-11-15 09:31:37 +01:00
										 |  |  |             url = eval_xpath(result, './/h2/a')[0].get('href') | 
					
						
							| 
									
										
										
										
											2019-07-25 08:17:45 +02:00
										 |  |  |             url = urljoin(base_url, url) | 
					
						
							| 
									
										
										
										
											2019-11-15 09:31:37 +01:00
										 |  |  |             title = eval_xpath(result, 'string(.//h2/a)').strip() | 
					
						
							|  |  |  |             content = extract_text(eval_xpath(result, './/p')) | 
					
						
							| 
									
										
										
										
											2018-08-18 19:24:02 +02:00
										 |  |  |             # append result | 
					
						
							|  |  |  |             results.append({'url': url, | 
					
						
							|  |  |  |                             'title': title, | 
					
						
							|  |  |  |                             'content': content}) | 
					
						
							|  |  |  |         except: | 
					
						
							|  |  |  |             logger.debug('result parse error in:\n%s', etree.tostring(result, pretty_print=True)) | 
					
						
							|  |  |  |             continue | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     return results |