change file structure to follow subject

This commit is contained in:
2025-04-03 17:31:05 +02:00
parent 1507d5924b
commit 9e96789cfe
3 changed files with 0 additions and 0 deletions

18
hidden_robot/README.md Normal file
View File

@ -0,0 +1,18 @@
# Finding the Hidden Flag
This is a quick rundown of our school project where we hacked a website (in a controlled lab environment) to find a hidden flag.
## What We Did
- **Starting Point:**
We began at the `.hidden` directory, which was kept out of search engines by the `robots.txt` file.
- **How It Worked:**
We built a script that went through each subdirectory looking for a README file. It checked the last byte of the file to spot any unusual values. If the byte didn't match the normal pattern, we knew we had found the flag.
- **Speeding Things Up:**
We used multi-threading so that multiple pages could be checked at once, which made the process much faster.
## Takeaway
This project taught us how to combine directory traversal with threading and basic HTML parsing to uncover hidden information. Check out the repo for the full code!

1
hidden_robot/flag Normal file
View File

@ -0,0 +1 @@
d5eec3ec36cf80dce44a896f961c1831a05526ec215693c8f2c39543497d4466

View File

@ -0,0 +1,68 @@
import threading
import requests
import time
from bs4 import BeautifulSoup
IP = "http://10.12.248.155/"
BASE_URL = ".hidden/"
thread_pool = []
visited_count = 0
stop_thread = False
class Request(threading.Thread):
def __init__(self, url=""):
self.url = url
threading.Thread.__init__(self)
def run(self):
global thread_pool
global visited_count
global stop_thread
if (stop_thread):
thread_pool.clear()
exit(0)
res = requests.get(IP + BASE_URL + self.url)
parsed = BeautifulSoup(res.content, "html.parser")
readme_value = parsed.find("pre").contents[-1].split(" ")[-1]
if not readme_value.startswith("34"):
stop_thread = True
time.sleep(0.5) # processing NSA hack
print("\n\nFound the flag on: ", IP + BASE_URL + self.url)
flag = requests.get(IP + BASE_URL + self.url + "README")
print(flag.content.decode(), "\n")
exit(0)
for link in parsed.find_all("a"):
url = link["href"]
if (url != "README" and url != "../"):
if (not stop_thread):
print("\r " + str(visited_count) + " " + self.url + url + " ", end="", flush=True)
new_thread = Request(self.url + url)
new_thread.start()
thread_pool.append(new_thread)
visited_count += 1
def main():
thread_pool.append(Request())
thread_pool[0].start()
for thread in thread_pool:
thread.join()
if __name__ == "__main__":
main()