First commit
This commit is contained in:
parent
449e44f899
commit
b8da1eddc1
17
README.md
17
README.md
@ -1,3 +1,18 @@
|
|||||||
# domainshop
|
# domainshop
|
||||||
|
|
||||||
Gets the newest list of expired domains.
|
## About
|
||||||
|
Get the latest list of expired domains from the Norwegian company [_domainnameshop_](https://domainname.shop/?currency=GBP&lang=en).
|
||||||
|
|
||||||
|
Use a number (integer) as argument to narrow down the length of the domain name (excluding the ```.no``` part!). Without an argument the app returns all domains.
|
||||||
|
|
||||||
|
## Usage examples
|
||||||
|
|
||||||
|
* ```app.py``` -> Returns all domains
|
||||||
|
* ```app.py 5``` -> Return only domains with 5 chars (+```.no```) (E.g. 12345.no or abc12.no)
|
||||||
|
* ```app.py 3 > list.txt``` -> Return domains with 3 chars to the file list.txt. Using ```> file.txt``` overwrites an existing file. Using ```>> file.txt``` appends to existing file or create new file.
|
||||||
|
|
||||||
|
## Disclaimer
|
||||||
|
|
||||||
|
This is just for fun! There is no guarantee that this app works in any way, shape or form ;-)
|
||||||
|
|
||||||
|
|
||||||
|
50
app.py
Normal file
50
app.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.append("/opt/homebrew/lib/python3.11/site-packages")
|
||||||
|
import sqlite3
|
||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
try:
|
||||||
|
max_len = int(sys.argv[1])
|
||||||
|
except IndexError:
|
||||||
|
max_len = 255
|
||||||
|
|
||||||
|
|
||||||
|
def cleanlist(my_list):
|
||||||
|
retList = []
|
||||||
|
for x in my_list:
|
||||||
|
if len(x) == (max_len + 3):
|
||||||
|
retList.append(x)
|
||||||
|
return retList
|
||||||
|
|
||||||
|
|
||||||
|
def fetch():
|
||||||
|
URL = "https://domene.shop/expired"
|
||||||
|
page = requests.get(URL)
|
||||||
|
return page.content
|
||||||
|
|
||||||
|
|
||||||
|
def parse():
|
||||||
|
soup = BeautifulSoup(fetch(), "html.parser")
|
||||||
|
web_links = soup.find_all("a")
|
||||||
|
actual_web_links = [web_link["href"] for web_link in web_links]
|
||||||
|
new_list = [x for x in actual_web_links if "/?domain=" in x]
|
||||||
|
final_list = [s.replace("/?domain=", "") for s in new_list]
|
||||||
|
final_list = [s.replace("xn--", "") for s in final_list] # remove all the 8s
|
||||||
|
the_list = cleanlist(final_list)
|
||||||
|
final_list = the_list.sort() # sorts normally by alphabetical order
|
||||||
|
the_list = sorted(the_list, key=len, reverse=False)
|
||||||
|
if len(the_list) > 0:
|
||||||
|
print(*the_list, sep="\n")
|
||||||
|
else:
|
||||||
|
print("No expired domains with the length citeria you wanted!")
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parse()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
Loading…
Reference in New Issue
Block a user