| 
									
										
										
										
											2021-01-13 11:31:25 +01:00
										 |  |  | # SPDX-License-Identifier: AGPL-3.0-or-later | 
					
						
							| 
									
										
										
										
											2016-05-19 07:38:43 +02:00
										 |  |  | """
 | 
					
						
							|  |  |  |  Ahmia (Onions) | 
					
						
							|  |  |  | """
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | from urllib.parse import urlencode, urlparse, parse_qs | 
					
						
							|  |  |  | from lxml.html import fromstring | 
					
						
							| 
									
										
										
										
											2020-11-26 17:22:54 +01:00
										 |  |  | from searx.engines.xpath import extract_url, extract_text, eval_xpath_list, eval_xpath | 
					
						
							| 
									
										
										
										
											2016-05-19 07:38:43 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-13 11:31:25 +01:00
										 |  |  | # about | 
					
						
							|  |  |  | about = { | 
					
						
							| 
									
										
										
										
											2021-07-16 07:50:58 +02:00
										 |  |  |     "website": 'http://juhanurmihxlp77nkq76byazcldy2hlmovfu2epvl5ankdibsot4csyd.onion', | 
					
						
							| 
									
										
										
										
											2021-01-13 11:31:25 +01:00
										 |  |  |     "wikidata_id": 'Q18693938', | 
					
						
							|  |  |  |     "official_api_documentation": None, | 
					
						
							|  |  |  |     "use_official_api": False, | 
					
						
							|  |  |  |     "require_api_key": False, | 
					
						
							|  |  |  |     "results": 'HTML', | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-19 07:38:43 +02:00
										 |  |  | # engine config | 
					
						
							|  |  |  | categories = ['onions'] | 
					
						
							|  |  |  | paging = True | 
					
						
							|  |  |  | page_size = 10 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | # search url | 
					
						
							| 
									
										
										
										
											2021-07-16 07:50:58 +02:00
										 |  |  | search_url = 'http://juhanurmihxlp77nkq76byazcldy2hlmovfu2epvl5ankdibsot4csyd.onion/search/?{query}' | 
					
						
							| 
									
										
										
										
											2016-05-19 07:38:43 +02:00
										 |  |  | time_range_support = True | 
					
						
							| 
									
										
										
										
											2021-12-27 09:26:22 +01:00
										 |  |  | time_range_dict = {'day': 1, 'week': 7, 'month': 30} | 
					
						
							| 
									
										
										
										
											2016-05-19 07:38:43 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | # xpaths | 
					
						
							|  |  |  | results_xpath = '//li[@class="result"]' | 
					
						
							|  |  |  | url_xpath = './h4/a/@href' | 
					
						
							|  |  |  | title_xpath = './h4/a[1]' | 
					
						
							|  |  |  | content_xpath = './/p[1]' | 
					
						
							|  |  |  | correction_xpath = '//*[@id="didYouMean"]//a' | 
					
						
							|  |  |  | number_of_results_xpath = '//*[@id="totalResults"]' | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | def request(query, params): | 
					
						
							|  |  |  |     params['url'] = search_url.format(query=urlencode({'q': query})) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     if params['time_range'] in time_range_dict: | 
					
						
							|  |  |  |         params['url'] += '&' + urlencode({'d': time_range_dict[params['time_range']]}) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     return params | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | def response(resp): | 
					
						
							|  |  |  |     results = [] | 
					
						
							|  |  |  |     dom = fromstring(resp.text) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     # trim results so there's not way too many at once | 
					
						
							|  |  |  |     first_result_index = page_size * (resp.search_params.get('pageno', 1) - 1) | 
					
						
							| 
									
										
										
										
											2020-11-26 17:22:54 +01:00
										 |  |  |     all_results = eval_xpath_list(dom, results_xpath) | 
					
						
							| 
									
										
										
										
											2021-12-27 09:26:22 +01:00
										 |  |  |     trimmed_results = all_results[first_result_index : first_result_index + page_size] | 
					
						
							| 
									
										
										
										
											2016-05-19 07:38:43 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  |     # get results | 
					
						
							|  |  |  |     for result in trimmed_results: | 
					
						
							|  |  |  |         # remove ahmia url and extract the actual url for the result | 
					
						
							| 
									
										
										
										
											2020-11-26 17:22:54 +01:00
										 |  |  |         raw_url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url) | 
					
						
							| 
									
										
										
										
											2016-05-19 07:38:43 +02:00
										 |  |  |         cleaned_url = parse_qs(urlparse(raw_url).query).get('redirect_url', [''])[0] | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-26 17:22:54 +01:00
										 |  |  |         title = extract_text(eval_xpath(result, title_xpath)) | 
					
						
							|  |  |  |         content = extract_text(eval_xpath(result, content_xpath)) | 
					
						
							| 
									
										
										
										
											2016-05-19 07:38:43 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-27 09:26:22 +01:00
										 |  |  |         results.append({'url': cleaned_url, 'title': title, 'content': content, 'is_onion': True}) | 
					
						
							| 
									
										
										
										
											2016-05-19 07:38:43 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  |     # get spelling corrections | 
					
						
							| 
									
										
										
										
											2020-11-26 17:22:54 +01:00
										 |  |  |     for correction in eval_xpath_list(dom, correction_xpath): | 
					
						
							| 
									
										
										
										
											2016-05-19 07:38:43 +02:00
										 |  |  |         results.append({'correction': extract_text(correction)}) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     # get number of results | 
					
						
							| 
									
										
										
										
											2020-11-26 17:22:54 +01:00
										 |  |  |     number_of_results = eval_xpath(dom, number_of_results_xpath) | 
					
						
							| 
									
										
										
										
											2016-05-19 07:38:43 +02:00
										 |  |  |     if number_of_results: | 
					
						
							|  |  |  |         try: | 
					
						
							|  |  |  |             results.append({'number_of_results': int(extract_text(number_of_results))}) | 
					
						
							|  |  |  |         except: | 
					
						
							|  |  |  |             pass | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     return results |