2020-06-28 19:11:24 +00:00
|
|
|
import httplib2
|
|
|
|
import re
|
2020-06-28 19:20:52 +00:00
|
|
|
import argparse
|
2020-06-28 19:11:24 +00:00
|
|
|
from bs4 import BeautifulSoup
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2020-06-28 19:20:52 +00:00
|
|
|
parser = argparse.ArgumentParser(description="Dig out links from a website.")
|
|
|
|
parser.add_argument('site', type=str, help="Website that you want to scrape for links.")
|
|
|
|
args = parser.parse_args()
|
|
|
|
|
2020-06-28 19:11:24 +00:00
|
|
|
h = httplib2.Http('.cache')
|
2020-06-28 19:20:52 +00:00
|
|
|
response, content = h.request(args.site)
|
2020-06-28 19:11:24 +00:00
|
|
|
s = BeautifulSoup(content)
|
|
|
|
"""find only file names"""
|
|
|
|
links = s.find_all(href=re.compile('\..*$'))
|
|
|
|
for link in links:
|
2020-06-28 19:20:52 +00:00
|
|
|
print(args.site + link['href'])
|