import requests
import json
import time
from bs4 import BeautifulSoup
from http.client import IncompleteRead
from pathlib import Path

# --------------------------------------------------
# CONFIGURATION
# --------------------------------------------------

MANUAL_URLS = [
   "https://www.tracwater.com",
   "https://www.tracwater.com/tracwater-sensors-tws",
   "https://www.tracwater.com/wirelessgroundwatersensors",
   "https://www.tracwater.com/wireless-transient-pressure-monitor",
   "https://www.tracwater.com/ijinus-wireless-pressure-and-flow-meters",
   "https://aquaanalytics.com.au/7-trends-for-smart-water-technology-in-australia-and-new-zealand/",
   "https://www.infoasaservice.com.au/home",
   "https://www.tracwater.com/ijinus-wireless-radar-level-sensor",
   "https://www.tracnet.com.au",
   "https://www.tracwater.com/wireless-underground-sewer-monitor"
]

TIMEOUT = 15

HEADERS = {
    "User-Agent": (
        "Mozilla/5.0 (X11; Linux x86_64) "
        "AppleWebKit/537.36 (KHTML, like Gecko) "
        "Chrome/120.0 Safari/537.36"
    ),
    "Accept-Language": "en-US,en;q=0.9",
    "Connection": "close"
}

# --------------------------------------------------
# HTML CLEANING
# --------------------------------------------------

def clean_html(soup: BeautifulSoup) -> str:
    for tag in soup([
        "script", "style", "nav",
        "footer", "header", "noscript",
        "iframe", "svg"
    ]):
        tag.decompose()

    return " ".join(soup.stripped_strings)

# --------------------------------------------------
# MANUAL SCRAPER
# --------------------------------------------------

def scrape_manual_urls(urls):
    pages = []

    for url in urls:
        try:
            response = requests.get(
                url,
                headers=HEADERS,
                timeout=TIMEOUT
            )
            response.raise_for_status()

            soup = BeautifulSoup(response.text, "lxml")
            text = clean_html(soup)

            if len(text) < 300:
                print(f"[WARN] Low content but saved: {url}")

            pages.append({
                "url": url,
                "text": text
            })

            print(f"[OK] Scraped: {url}")
            time.sleep(0.5)

        except (IncompleteRead, requests.exceptions.RequestException) as e:
            print(f"[ERROR] Failed to scrape {url} -> {e}")

    return pages


data = scrape_manual_urls(MANUAL_URLS)

OUTPUT_DIR = Path("_manual_scraped_data")
OUTPUT_DIR.mkdir(exist_ok=True)

output_file = OUTPUT_DIR / "manual_raw.json"

with open(output_file, "w", encoding="utf-8") as f:
    json.dump(data, f, indent=2, ensure_ascii=False)

# --------------------------------------------------
# EXECUTION
# --------------------------------------------------

if __name__ == "__main__":
  
    print("\n----------------------------------")
    print(f"Total URLs provided: {len(MANUAL_URLS)}")
    print(f"Total pages scraped: {len(data)}")
    print(f"Saved JSON to: {output_file}")
    print("----------------------------------\n")

    if data:
        print("Sample URL:", data[0]["url"])
        print("Sample Text (first 500 chars):\n")
        print(data[0]["text"][:500])
