1
votes

I am trying to run an email harvester, when I enter the url manually without the loop, I do not have any connection errors.

import re
import requests
import requests.exceptions
from urllib.parse import urlsplit
from collections import deque
from bs4 import BeautifulSoup


def email_harvest(starting_url):
    # starting url. replace google with your own url.
    #starting_url = 'http://www.miet.ac.in'
    print ('this is the starting urli '+starting_url)   
    #starting_url = website_url[i]
#   i += 1
    # a queue of urls to be crawled
    unprocessed_urls = deque([starting_url])

    # set of already crawled urls for email
    processed_urls = set()

    # a set of fetched emails
    emails = set()

    # process urls one by one from unprocessed_url queue until queue is empty
    while len(unprocessed_urls):

        # move next url from the queue to the set of processed urls
        url = unprocessed_urls.popleft()
        processed_urls.add(url)

        # extract base url to resolve relative links
        parts = urlsplit(url)
        base_url = "{0.scheme}://{0.netloc}".format(parts)
        path = url[:url.rfind('/')+1] if '/' in parts.path else url
        print (url)
        # get url's content
        #print("Crawling URL %s" % url)
        try:
            response = requests.get(url)
            print (response.status_code)
        except (requests.exceptions.MissingSchema, requests.exceptions.ConnectionError):
            # ignore pages with errors and continue with next url
            print ("error crawing " % url)
            continue

        # extract all email addresses and add them into the resulting set
        # You may edit the regular expression as per your requirement
        new_emails = set(re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", response.text, re.I))
        emails.update(new_emails)
        print(emails)
        # create a beutiful soup for the html document
        soup = BeautifulSoup(response.text, 'lxml')

        # Once this document is parsed and processed, now find and process all the anchors i.e. linked urls in this document
        for anchor in soup.find_all("a"):
            # extract link url from the anchor
            link = anchor.attrs["href"] if "href" in anchor.attrs else ''
            # resolve relative links (starting with /)
            if link.startswith('/'):
                link = base_url + link
            elif not link.startswith('http'):
                link = path + link
            # add the new url to the queue if it was not in unprocessed list nor in processed list yet
            if not link in unprocessed_urls and not link in processed_urls:
                unprocessed_urls.append(link)


website_url = tuple(open('text.txt','r'))
i = 0
while i < (len(website_url)+1):
    print (i)
    starting_url = 'http://'+ website_url[i]
    email_harvest(starting_url)
    i +=1

however when I load the url from file I get the below error 'name or service error'

Traceback (most recent call last): File "/usr/lib/python3/dist-packages/urllib3/connection.py", line 141, in _new_conn (self.host, self.port), self.timeout, **extra_kw) File "/usr/lib/python3/dist-packages/urllib3/util/connection.py", line 60, in create_connection for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): File "/usr/lib/python3.6/socket.py", line 745, in getaddrinfo for res in _socket.getaddrinfo(host, port, family, type, proto, flags): socket.gaierror: [Errno -2] Name or service not known

During handling of the above exception, another exception occurred:

Traceback (most recent call last): File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 601, in urlopen chunked=chunked) File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 357, in _make_request conn.request(method, url, **httplib_request_kw) File "/usr/lib/python3.6/http/client.py", line 1254, in request self._send_request(method, url, body, headers, encode_chunked) File "/usr/lib/python3.6/http/client.py", line 1300, in _send_request self.endheaders(body, encode_chunked=encode_chunked) File "/usr/lib/python3.6/http/client.py", line 1249, in endheaders self._send_output(message_body, encode_chunked=encode_chunked) File "/usr/lib/python3.6/http/client.py", line 1036, in _send_output self.send(msg) File "/usr/lib/python3.6/http/client.py", line 974, in send self.connect() File "/usr/lib/python3/dist-packages/urllib3/connection.py", line 166, in connect conn = self._new_conn() File "/usr/lib/python3/dist-packages/urllib3/connection.py", line 150, in _new_conn self, "Failed to establish a new connection: %s" % e) urllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno -2] Name or service not known

During handling of the above exception, another exception occurred:

Traceback (most recent call last): File "/usr/local/lib/python3.6/dist-packages/requests/adapters.py", line 449, in send timeout=timeout File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 639, in urlopen _stacktrace=sys.exc_info()[2]) File "/usr/lib/python3/dist-packages/urllib3/util/retry.py", line 398, in increment raise MaxRetryError(_pool, url, error or ResponseError(cause)) urllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='www.miet.ac.in%0a', port=80): Max retries exceeded with url: / (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -2] Name or service not known',))

During handling of the above exception, another exception occurred:

Traceback (most recent call last): File "editog.py", line 39, in email_harvest response = requests.get(url) File "/usr/local/lib/python3.6/dist-packages/requests/api.py", line 75, in get return request('get', url, params=params, **kwargs) File "/usr/local/lib/python3.6/dist-packages/requests/api.py", line 60, in request return session.request(method=method, url=url, **kwargs) File "/usr/local/lib/python3.6/dist-packages/requests/sessions.py", line 533, in request resp = self.send(prep, **send_kwargs) File "/usr/local/lib/python3.6/dist-packages/requests/sessions.py", line 646, in send r = adapter.send(request, **kwargs) File "/usr/local/lib/python3.6/dist-packages/requests/adapters.py", line 516, in send raise ConnectionError(e, request=request) requests.exceptions.ConnectionError: HTTPConnectionPool(host='www.miet.ac.in%0a', port=80): Max retries exceeded with url: / (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -2] Name or service not known',))

Note:

  1. I'm not behind any proxy, there is no filtering.
  2. Internet is stable.
2
try to use --newtork host in your container , if that helped you may chnage the conatiner dns settingsLinPy
nope did not work..,Charm_quark
Use chrome or firefox devtools to track the http request. check what is http method, get/post, request's data and headers contains. Sometimes input params split into data and headers. If the request is GET then you need to set or append headers.drt

2 Answers

0
votes

host='www.miet.ac.in%0a', port=80

The problem is with your string interpolation

2
votes

Looks like connection is trying to connect to invalid url.

HTTPConnectionPool(host='www.miet.ac.in%0a', port=80)

This url ('www.miet.ac.in%0a') is valid? I was able to access 'www.miet.ac.in, but not 'www.miet.ac.in%0a'

If it is valid, can you add what you did without loop too?