-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathfirecrawlbasics.py
More file actions
153 lines (131 loc) · 5.6 KB
/
firecrawlbasics.py
File metadata and controls
153 lines (131 loc) · 5.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
from firecrawl import Firecrawl
import os
from urllib.parse import urlparse
import re
firecrawl = Firecrawl(api_key="[REDACTED]")
# Crawl a website (follows child links):
url = "https://docs.inductiveautomation.com/docs/8.1/appendix"
max_pages = 200 # Limit the number of pages to crawl
output_folder = "scraped_docs" # Folder to save markdown files
# Create output folder if it doesn't exist
os.makedirs(output_folder, exist_ok=True)
def sanitize_filename(text, max_length=200):
"""Convert text to a safe filename."""
# Remove or replace invalid filename characters
text = re.sub(r'[<>:"/\\|?*]', "-", text)
# Remove leading/trailing spaces and dots
text = text.strip(" .")
# Replace multiple spaces/dashes with single dash
text = re.sub(r"[\s\-]+", "-", text)
# Truncate if too long
if len(text) > max_length:
text = text[:max_length]
return text
def get_filename_from_url(url, title=None):
"""Generate a filename from URL or title."""
parsed = urlparse(url)
# Use path as base for filename
path = parsed.path.strip("/")
if path:
# Replace slashes with dashes
filename = path.replace("/", "-")
# Remove file extensions and add .md
filename = re.sub(r"\.[a-z]+$", "", filename, flags=re.IGNORECASE)
else:
# Fallback to title or domain
filename = title if title else parsed.netloc
filename = sanitize_filename(filename)
# Ensure it ends with .md
if not filename.endswith(".md"):
filename += ".md"
return filename
print(f"Crawling: {url}")
print(f"Max pages: {max_pages}")
print(f"Output folder: {output_folder}")
print("This may take a moment as it follows links...")
try:
# Use crawl instead of scrape to follow child links
print("\nStarting crawl...")
result = firecrawl.crawl(
url,
limit=max_pages, # Maximum number of pages to crawl
crawl_entire_domain=True, # Follow links across the entire domain (not just child paths)
max_discovery_depth=3, # Maximum depth to crawl
# Path filters (optional - comment out if too restrictive):
include_paths=["/docs/8.1/appendix"], # Only crawl pages matching this path
# exclude_paths=["/api/", "/admin/"], # Exclude these paths
)
print("Crawl completed!")
# Check if the crawl was successful
if result and hasattr(result, "data"):
pages = result.data if isinstance(result.data, list) else [result.data]
print(f"\nCrawl Status: {result.status}")
print(f"Total pages found: {result.total}")
print(f"Successfully crawled {len(pages)} page(s)!")
# Display summary and save each page
saved_files = []
for i, page in enumerate(pages, 1):
print(f"\n--- Page {i} ---")
page_url = None
page_title = None
if hasattr(page, "metadata") and page.metadata:
page_url = page.metadata.url if hasattr(page.metadata, "url") else None
page_title = (
page.metadata.title if hasattr(page.metadata, "title") else None
)
print(f"URL: {page_url or 'N/A'}")
print(f"Title: {page_title or 'N/A'}")
print(
f"Status: {page.metadata.status_code if hasattr(page.metadata, 'status_code') else 'N/A'}"
)
# Save markdown to file
if hasattr(page, "markdown") and page.markdown:
# Generate filename
filename = get_filename_from_url(page_url or url, page_title)
filepath = os.path.join(output_folder, filename)
# Handle duplicate filenames
counter = 1
original_filepath = filepath
while os.path.exists(filepath):
base_name = original_filepath.replace(".md", "")
filepath = f"{base_name}_{counter}.md"
counter += 1
# Save markdown content
try:
with open(filepath, "w", encoding="utf-8") as f:
# Add metadata header
f.write(f"# {page_title or 'Untitled'}\n\n")
if page_url:
f.write(f"**Source URL:** {page_url}\n\n")
f.write("---\n\n")
# Write markdown content
f.write(page.markdown)
saved_files.append(filepath)
print(f"✓ Saved: {filepath}")
print(
f"Markdown preview (first 200 chars): {page.markdown[:200]}..."
)
except Exception as save_error:
print(f"✗ Error saving {filename}: {save_error}")
else:
print("⚠ No markdown content to save")
print()
# Save all pages to a list for further processing
all_pages = pages
print(f"\nTotal pages crawled: {len(all_pages)}")
print(f"Total files saved: {len(saved_files)}")
print(f"Files saved to: {os.path.abspath(output_folder)}")
elif result:
# Handle single page result
print("\nCrawl completed!")
if hasattr(result, "markdown") and result.markdown:
print(f"\n--- Markdown Content (first 500 chars) ---")
print(result.markdown[:500])
print(f"\n--- Full Result Object ---")
print(result)
else:
print("\nNo results returned from crawl.")
except Exception as e:
print(f"\nError occurred: {e}")
import traceback
traceback.print_exc()