www1x.py 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. 1x (Images)
  4. """
  5. from lxml import html, etree
  6. from urllib.parse import urlencode, urljoin
  7. from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex
  8. # about
  9. about = {
  10. "website": 'https://1x.com/',
  11. "wikidata_id": None,
  12. "official_api_documentation": None,
  13. "use_official_api": False,
  14. "require_api_key": False,
  15. "results": 'HTML',
  16. }
  17. # engine dependent config
  18. categories = ['images']
  19. paging = False
  20. # search-url
  21. base_url = 'https://1x.com'
  22. search_url = base_url + '/backend/search.php?{query}'
  23. gallery_url = 'https://gallery.1x.com/'
  24. # do search-request
  25. def request(query, params):
  26. params['url'] = search_url.format(query=urlencode({'q': query}))
  27. return params
  28. # get response from search-request
  29. def response(resp):
  30. results = []
  31. xmldom = etree.fromstring(resp.content)
  32. xmlsearchresult = eval_xpath_getindex(xmldom, '//searchresult', 0)
  33. dom = html.fragment_fromstring(xmlsearchresult.text, create_parent='div')
  34. for link in eval_xpath_list(dom, '/div/table/tr/td/div[2]//a'):
  35. url = urljoin(base_url, link.attrib.get('href'))
  36. title = extract_text(link)
  37. thumbnail_src = urljoin(gallery_url, eval_xpath_getindex(link, './/img', 0).attrib['src'])
  38. # append result
  39. results.append({'url': url,
  40. 'title': title,
  41. 'img_src': thumbnail_src,
  42. 'content': '',
  43. 'thumbnail_src': thumbnail_src,
  44. 'template': 'images.html'})
  45. # return results
  46. return results