Datasets:
ArXiv:
License:
Upload folder using huggingface_hub
Browse files- Crawler/Clients/Client_request.py +82 -0
- Crawler/Clients/mcpso_client_detail_request.py +142 -0
- Crawler/Clients/mcpso_clients.json +0 -0
- Crawler/Servers/Server_request.py +82 -0
- Crawler/Servers/mcpso_server_detail_request.py +142 -0
- Crawler/Servers/mcpso_servers.json +0 -0
- Crawler/data_cleaner.py +102 -0
- Crawler/github_info_collector.py +157 -0
Crawler/Clients/Client_request.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import re
|
3 |
+
import time
|
4 |
+
import os
|
5 |
+
import json
|
6 |
+
|
7 |
+
RETRY = 3
|
8 |
+
SLEEP_BETWEEN_RETRY = 2
|
9 |
+
|
10 |
+
def extract_hrefs(response_text):
|
11 |
+
# Extract all hrefs
|
12 |
+
return re.findall(r'"href":"(/client/[^"]+)"', response_text)
|
13 |
+
|
14 |
+
def main():
|
15 |
+
output_file = "mcpso_all_hrefs.json"
|
16 |
+
visited = set()
|
17 |
+
href_list = []
|
18 |
+
# Resume: load already saved hrefs
|
19 |
+
if os.path.exists(output_file):
|
20 |
+
with open(output_file, "r", encoding="utf-8") as f:
|
21 |
+
try:
|
22 |
+
href_list = json.load(f)
|
23 |
+
for item in href_list:
|
24 |
+
visited.add(item["href"])
|
25 |
+
except Exception:
|
26 |
+
pass
|
27 |
+
|
28 |
+
headers = {
|
29 |
+
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36",
|
30 |
+
"Accept": "*/*",
|
31 |
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
32 |
+
"Accept-Language": "zh-CN,zh;q=0.9",
|
33 |
+
"Cookie": "Your Cookie",
|
34 |
+
"Next-Url": "/en/servers",
|
35 |
+
"Priority": "u=1, i",
|
36 |
+
"Referer": "https://mcp.so/servers",
|
37 |
+
"Rsc": "1",
|
38 |
+
"Sec-Ch-Ua": '"Chromium";v="136", "Google Chrome";v="136", "Not.A/Brand";v="99"',
|
39 |
+
"Sec-Ch-Ua-Arch": "arm",
|
40 |
+
"Sec-Ch-Ua-Bitness": "64",
|
41 |
+
"Sec-Ch-Ua-Full-Version": "136.0.7103.114",
|
42 |
+
"Sec-Ch-Ua-Full-Version-List": '"Chromium";v="136.0.7103.114", "Google Chrome";v="136.0.7103.114", "Not.A/Brand";v="99.0.0.0"',
|
43 |
+
"Sec-Ch-Ua-Mobile": "?0",
|
44 |
+
"Sec-Ch-Ua-Model": '""',
|
45 |
+
"Sec-Ch-Ua-Platform": '"macOS"',
|
46 |
+
"Sec-Ch-Ua-Platform-Version": '"15.3.0"',
|
47 |
+
"Sec-Fetch-Dest": "empty",
|
48 |
+
"Sec-Fetch-Mode": "cors",
|
49 |
+
"Sec-Fetch-Site": "same-origin"
|
50 |
+
}
|
51 |
+
|
52 |
+
for page in range(1, 477):
|
53 |
+
url = f"https://mcp.so/clients?page={page}"
|
54 |
+
print(f"Requesting page {page}: {url}")
|
55 |
+
for attempt in range(1, RETRY + 1):
|
56 |
+
try:
|
57 |
+
resp = requests.get(url, headers=headers, timeout=30)
|
58 |
+
if resp.status_code != 200:
|
59 |
+
print(f"Page {page} failed: HTTP {resp.status_code}, attempt {attempt}/{RETRY}")
|
60 |
+
time.sleep(SLEEP_BETWEEN_RETRY)
|
61 |
+
continue
|
62 |
+
hrefs = extract_hrefs(resp.text)
|
63 |
+
new_hrefs = [h for h in hrefs if h not in visited]
|
64 |
+
for h in new_hrefs:
|
65 |
+
href_list.append({"href": h})
|
66 |
+
visited.add(h)
|
67 |
+
# Save in real time
|
68 |
+
with open(output_file, "w", encoding="utf-8") as f:
|
69 |
+
json.dump(href_list, f, ensure_ascii=False, indent=2)
|
70 |
+
print(f"Page {page} got {len(new_hrefs)} new, total {len(href_list)}")
|
71 |
+
time.sleep(1)
|
72 |
+
break
|
73 |
+
except Exception as e:
|
74 |
+
print(f"Page {page} exception: {e}, attempt {attempt}/{RETRY}")
|
75 |
+
time.sleep(SLEEP_BETWEEN_RETRY)
|
76 |
+
continue
|
77 |
+
else:
|
78 |
+
print(f"Page {page} failed after {RETRY} retries.")
|
79 |
+
print(f"All done. Total unique hrefs: {len(href_list)}. Saved to {output_file}")
|
80 |
+
|
81 |
+
if __name__ == "__main__":
|
82 |
+
main()
|
Crawler/Clients/mcpso_client_detail_request.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import re
|
3 |
+
import json
|
4 |
+
import time
|
5 |
+
import os
|
6 |
+
|
7 |
+
def extract_current_project(text):
|
8 |
+
# 1. Locate currentProject start position
|
9 |
+
key = '"currentProject":'
|
10 |
+
start = text.find(key)
|
11 |
+
if start == -1:
|
12 |
+
print("currentProject not found!")
|
13 |
+
return None
|
14 |
+
start = start + len(key)
|
15 |
+
# 2. Starting from the first {, use bracket counting method to find matching }
|
16 |
+
while start < len(text) and text[start] not in '{':
|
17 |
+
start += 1
|
18 |
+
if start == len(text):
|
19 |
+
print("currentProject JSON start not found!")
|
20 |
+
return None
|
21 |
+
brace_count = 0
|
22 |
+
end = start
|
23 |
+
for i, c in enumerate(text[start:]):
|
24 |
+
if c == '{':
|
25 |
+
brace_count += 1
|
26 |
+
elif c == '}':
|
27 |
+
brace_count -= 1
|
28 |
+
if brace_count == 0:
|
29 |
+
end = start + i + 1
|
30 |
+
break
|
31 |
+
json_str = text[start:end]
|
32 |
+
try:
|
33 |
+
profile = json.loads(json_str)
|
34 |
+
return profile
|
35 |
+
except Exception as e:
|
36 |
+
print(f"JSON decode error: {e}")
|
37 |
+
return None
|
38 |
+
|
39 |
+
def request_server_detail(url, headers):
|
40 |
+
try:
|
41 |
+
resp = requests.get(url, headers=headers, timeout=30)
|
42 |
+
print(f"Status code: {resp.status_code} for {url}")
|
43 |
+
if resp.status_code == 200:
|
44 |
+
profile = extract_current_project(resp.text)
|
45 |
+
return profile
|
46 |
+
else:
|
47 |
+
print(f"Failed to get detail: HTTP {resp.status_code}")
|
48 |
+
return None
|
49 |
+
except Exception as e:
|
50 |
+
print(f"Exception: {e}")
|
51 |
+
return None
|
52 |
+
|
53 |
+
def batch_request_servers():
|
54 |
+
# Read mcpso_clients.json
|
55 |
+
servers_path = os.path.join(os.path.dirname(__file__), 'mcpso_clients.json')
|
56 |
+
with open(servers_path, 'r', encoding='utf-8') as f:
|
57 |
+
servers = json.load(f)
|
58 |
+
headers = {
|
59 |
+
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36",
|
60 |
+
"Accept": "*/*",
|
61 |
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
62 |
+
"Accept-Language": "zh-CN,zh;q=0.9",
|
63 |
+
"Cookie": "Your Cookie",
|
64 |
+
"Next-Url": "/en/servers",
|
65 |
+
"Priority": "u=1, i",
|
66 |
+
"Referer": "https://mcp.so/servers",
|
67 |
+
"Rsc": "1",
|
68 |
+
"Sec-Ch-Ua": '"Chromium";v="136", "Google Chrome";v="136", "Not.A/Brand";v="99"',
|
69 |
+
"Sec-Ch-Ua-Arch": "arm",
|
70 |
+
"Sec-Ch-Ua-Bitness": "64",
|
71 |
+
"Sec-Ch-Ua-Full-Version": "136.0.7103.114",
|
72 |
+
"Sec-Ch-Ua-Full-Version-List": '"Chromium";v="136.0.7103.114", "Google Chrome";v="136.0.7103.114", "Not.A/Brand";v="99.0.0.0"',
|
73 |
+
"Sec-Ch-Ua-Mobile": "?0",
|
74 |
+
"Sec-Ch-Ua-Model": '""',
|
75 |
+
"Sec-Ch-Ua-Platform": '"macOS"',
|
76 |
+
"Sec-Ch-Ua-Platform-Version": '"15.3.0"',
|
77 |
+
"Sec-Fetch-Dest": "empty",
|
78 |
+
"Sec-Fetch-Mode": "cors",
|
79 |
+
"Sec-Fetch-Site": "same-origin"
|
80 |
+
}
|
81 |
+
for idx, item in enumerate(servers):
|
82 |
+
# Skip already collected items (already have name field)
|
83 |
+
if 'name' in item and 'metadata' in item:
|
84 |
+
continue
|
85 |
+
href = item.get('href')
|
86 |
+
if not href:
|
87 |
+
continue
|
88 |
+
detail_url = f"https://mcp.so{href}"
|
89 |
+
print(f"Requesting: {detail_url}")
|
90 |
+
profile = request_server_detail(detail_url, headers)
|
91 |
+
if not profile:
|
92 |
+
print(f"Skip {href} due to extraction failure.")
|
93 |
+
continue
|
94 |
+
name = profile.get('name')
|
95 |
+
url = profile.get('url')
|
96 |
+
metadata = profile.copy()
|
97 |
+
metadata.pop('name', None)
|
98 |
+
metadata.pop('url', None)
|
99 |
+
item['name'] = name
|
100 |
+
item['url'] = url
|
101 |
+
item['metadata'] = metadata
|
102 |
+
# Write back in real time
|
103 |
+
with open(servers_path, 'w', encoding='utf-8') as f:
|
104 |
+
json.dump(servers, f, ensure_ascii=False, indent=2)
|
105 |
+
print(f"Updated {idx+1}/{len(servers)}: {name}")
|
106 |
+
time.sleep(1)
|
107 |
+
print(f"All servers updated in {servers_path}")
|
108 |
+
|
109 |
+
if __name__ == "__main__":
|
110 |
+
# # Single collection (original logic)
|
111 |
+
# url = "https://mcp.so/server/zhipu-web-search/BigModel?_rsc=n713a"
|
112 |
+
# headers = {
|
113 |
+
# "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36",
|
114 |
+
# "Accept": "*/*",
|
115 |
+
# "Accept-Encoding": "gzip, deflate, br, zstd",
|
116 |
+
# "Accept-Language": "zh-CN,zh;q=0.9",
|
117 |
+
# "Next-Url": "/en/server/zhipu-web-search/BigModel",
|
118 |
+
# "Priority": "u=1, i",
|
119 |
+
# "Referer": "https://mcp.so/server/zhipu-web-search/BigModel",
|
120 |
+
# "Rsc": "1",
|
121 |
+
# "Sec-Ch-Ua": '"Chromium";v="136", "Google Chrome";v="136", "Not.A/Brand";v="99"',
|
122 |
+
# "Sec-Ch-Ua-Arch": "arm",
|
123 |
+
# "Sec-Ch-Ua-Bitness": "64",
|
124 |
+
# "Sec-Ch-Ua-Full-Version": "136.0.7103.114",
|
125 |
+
# "Sec-Ch-Ua-Full-Version-List": '"Chromium";v="136.0.7103.114", "Google Chrome";v="136.0.7103.114", "Not.A/Brand";v="99.0.0.0"',
|
126 |
+
# "Sec-Ch-Ua-Mobile": "?0",
|
127 |
+
# "Sec-Ch-Ua-Model": '""',
|
128 |
+
# "Sec-Ch-Ua-Platform": '"macOS"',
|
129 |
+
# "Sec-Ch-Ua-Platform-Version": '"15.3.0"',
|
130 |
+
# "Sec-Fetch-Dest": "empty",
|
131 |
+
# "Sec-Fetch-Mode": "cors",
|
132 |
+
# "Sec-Fetch-Site": "same-origin"
|
133 |
+
# }
|
134 |
+
# profile = request_server_detail(url, headers)
|
135 |
+
# if profile:
|
136 |
+
# with open("server_zhipu-web-search_BigModel_profile.json", "w", encoding="utf-8") as f:
|
137 |
+
# json.dump(profile, f, ensure_ascii=False, indent=2)
|
138 |
+
# print("Profile saved to server_zhipu-web-search_BigModel_profile.json")
|
139 |
+
# else:
|
140 |
+
# print("Profile extraction failed!")
|
141 |
+
# Batch collection
|
142 |
+
batch_request_servers()
|
Crawler/Clients/mcpso_clients.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Crawler/Servers/Server_request.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import re
|
3 |
+
import time
|
4 |
+
import os
|
5 |
+
import json
|
6 |
+
|
7 |
+
RETRY = 3
|
8 |
+
SLEEP_BETWEEN_RETRY = 2
|
9 |
+
|
10 |
+
def extract_hrefs(response_text):
|
11 |
+
# Extract all hrefs
|
12 |
+
return re.findall(r'"href":"(/server/[^"]+)"', response_text)
|
13 |
+
|
14 |
+
def main():
|
15 |
+
output_file = "mcpso_all_hrefs.json"
|
16 |
+
visited = set()
|
17 |
+
href_list = []
|
18 |
+
# Resume: load already saved hrefs
|
19 |
+
if os.path.exists(output_file):
|
20 |
+
with open(output_file, "r", encoding="utf-8") as f:
|
21 |
+
try:
|
22 |
+
href_list = json.load(f)
|
23 |
+
for item in href_list:
|
24 |
+
visited.add(item["href"])
|
25 |
+
except Exception:
|
26 |
+
pass
|
27 |
+
|
28 |
+
headers = {
|
29 |
+
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36",
|
30 |
+
"Accept": "*/*",
|
31 |
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
32 |
+
"Accept-Language": "zh-CN,zh;q=0.9",
|
33 |
+
"Cookie": "Your Cookie",
|
34 |
+
"Next-Url": "/en/servers",
|
35 |
+
"Priority": "u=1, i",
|
36 |
+
"Referer": "https://mcp.so/servers",
|
37 |
+
"Rsc": "1",
|
38 |
+
"Sec-Ch-Ua": '"Chromium";v="136", "Google Chrome";v="136", "Not.A/Brand";v="99"',
|
39 |
+
"Sec-Ch-Ua-Arch": "arm",
|
40 |
+
"Sec-Ch-Ua-Bitness": "64",
|
41 |
+
"Sec-Ch-Ua-Full-Version": "136.0.7103.114",
|
42 |
+
"Sec-Ch-Ua-Full-Version-List": '"Chromium";v="136.0.7103.114", "Google Chrome";v="136.0.7103.114", "Not.A/Brand";v="99.0.0.0"',
|
43 |
+
"Sec-Ch-Ua-Mobile": "?0",
|
44 |
+
"Sec-Ch-Ua-Model": '""',
|
45 |
+
"Sec-Ch-Ua-Platform": '"macOS"',
|
46 |
+
"Sec-Ch-Ua-Platform-Version": '"15.3.0"',
|
47 |
+
"Sec-Fetch-Dest": "empty",
|
48 |
+
"Sec-Fetch-Mode": "cors",
|
49 |
+
"Sec-Fetch-Site": "same-origin"
|
50 |
+
}
|
51 |
+
|
52 |
+
for page in range(1, 477):
|
53 |
+
url = f"https://mcp.so/servers?page={page}"
|
54 |
+
print(f"Requesting page {page}: {url}")
|
55 |
+
for attempt in range(1, RETRY + 1):
|
56 |
+
try:
|
57 |
+
resp = requests.get(url, headers=headers, timeout=30)
|
58 |
+
if resp.status_code != 200:
|
59 |
+
print(f"Page {page} failed: HTTP {resp.status_code}, attempt {attempt}/{RETRY}")
|
60 |
+
time.sleep(SLEEP_BETWEEN_RETRY)
|
61 |
+
continue
|
62 |
+
hrefs = extract_hrefs(resp.text)
|
63 |
+
new_hrefs = [h for h in hrefs if h not in visited]
|
64 |
+
for h in new_hrefs:
|
65 |
+
href_list.append({"href": h})
|
66 |
+
visited.add(h)
|
67 |
+
# Save in real time
|
68 |
+
with open(output_file, "w", encoding="utf-8") as f:
|
69 |
+
json.dump(href_list, f, ensure_ascii=False, indent=2)
|
70 |
+
print(f"Page {page} got {len(new_hrefs)} new, total {len(href_list)}")
|
71 |
+
time.sleep(1)
|
72 |
+
break
|
73 |
+
except Exception as e:
|
74 |
+
print(f"Page {page} exception: {e}, attempt {attempt}/{RETRY}")
|
75 |
+
time.sleep(SLEEP_BETWEEN_RETRY)
|
76 |
+
continue
|
77 |
+
else:
|
78 |
+
print(f"Page {page} failed after {RETRY} retries.")
|
79 |
+
print(f"All done. Total unique hrefs: {len(href_list)}. Saved to {output_file}")
|
80 |
+
|
81 |
+
if __name__ == "__main__":
|
82 |
+
main()
|
Crawler/Servers/mcpso_server_detail_request.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import re
|
3 |
+
import json
|
4 |
+
import time
|
5 |
+
import os
|
6 |
+
|
7 |
+
def extract_current_project(text):
|
8 |
+
# 1. Locate currentProject start position
|
9 |
+
key = '"currentProject":'
|
10 |
+
start = text.find(key)
|
11 |
+
if start == -1:
|
12 |
+
print("currentProject not found!")
|
13 |
+
return None
|
14 |
+
start = start + len(key)
|
15 |
+
# 2. Starting from the first {, use bracket counting method to find matching }
|
16 |
+
while start < len(text) and text[start] not in '{':
|
17 |
+
start += 1
|
18 |
+
if start == len(text):
|
19 |
+
print("currentProject JSON start not found!")
|
20 |
+
return None
|
21 |
+
brace_count = 0
|
22 |
+
end = start
|
23 |
+
for i, c in enumerate(text[start:]):
|
24 |
+
if c == '{':
|
25 |
+
brace_count += 1
|
26 |
+
elif c == '}':
|
27 |
+
brace_count -= 1
|
28 |
+
if brace_count == 0:
|
29 |
+
end = start + i + 1
|
30 |
+
break
|
31 |
+
json_str = text[start:end]
|
32 |
+
try:
|
33 |
+
profile = json.loads(json_str)
|
34 |
+
return profile
|
35 |
+
except Exception as e:
|
36 |
+
print(f"JSON decode error: {e}")
|
37 |
+
return None
|
38 |
+
|
39 |
+
def request_server_detail(url, headers):
|
40 |
+
try:
|
41 |
+
resp = requests.get(url, headers=headers, timeout=30)
|
42 |
+
print(f"Status code: {resp.status_code} for {url}")
|
43 |
+
if resp.status_code == 200:
|
44 |
+
profile = extract_current_project(resp.text)
|
45 |
+
return profile
|
46 |
+
else:
|
47 |
+
print(f"Failed to get detail: HTTP {resp.status_code}")
|
48 |
+
return None
|
49 |
+
except Exception as e:
|
50 |
+
print(f"Exception: {e}")
|
51 |
+
return None
|
52 |
+
|
53 |
+
def batch_request_servers():
|
54 |
+
# Read mcpso_servers.json
|
55 |
+
servers_path = os.path.join(os.path.dirname(__file__), 'mcpso_servers.json')
|
56 |
+
with open(servers_path, 'r', encoding='utf-8') as f:
|
57 |
+
servers = json.load(f)
|
58 |
+
headers = {
|
59 |
+
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36",
|
60 |
+
"Accept": "*/*",
|
61 |
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
62 |
+
"Accept-Language": "zh-CN,zh;q=0.9",
|
63 |
+
"Cookie": "Your Cookie",
|
64 |
+
"Next-Url": "/en/server/zhipu-web-search/BigModel",
|
65 |
+
"Priority": "u=1, i",
|
66 |
+
"Referer": "https://mcp.so/server/zhipu-web-search/BigModel",
|
67 |
+
"Rsc": "1",
|
68 |
+
"Sec-Ch-Ua": '"Chromium";v="136", "Google Chrome";v="136", "Not.A/Brand";v="99"',
|
69 |
+
"Sec-Ch-Ua-Arch": "arm",
|
70 |
+
"Sec-Ch-Ua-Bitness": "64",
|
71 |
+
"Sec-Ch-Ua-Full-Version": "136.0.7103.114",
|
72 |
+
"Sec-Ch-Ua-Full-Version-List": '"Chromium";v="136.0.7103.114", "Google Chrome";v="136.0.7103.114", "Not.A/Brand";v="99.0.0.0"',
|
73 |
+
"Sec-Ch-Ua-Mobile": "?0",
|
74 |
+
"Sec-Ch-Ua-Model": '""',
|
75 |
+
"Sec-Ch-Ua-Platform": '"macOS"',
|
76 |
+
"Sec-Ch-Ua-Platform-Version": '"15.3.0"',
|
77 |
+
"Sec-Fetch-Dest": "empty",
|
78 |
+
"Sec-Fetch-Mode": "cors",
|
79 |
+
"Sec-Fetch-Site": "same-origin"
|
80 |
+
}
|
81 |
+
for idx, item in enumerate(servers):
|
82 |
+
# Skip already collected items (already have name field)
|
83 |
+
if 'name' in item and 'metadata' in item:
|
84 |
+
continue
|
85 |
+
href = item.get('href')
|
86 |
+
if not href:
|
87 |
+
continue
|
88 |
+
detail_url = f"https://mcp.so{href}"
|
89 |
+
print(f"Requesting: {detail_url}")
|
90 |
+
profile = request_server_detail(detail_url, headers)
|
91 |
+
if not profile:
|
92 |
+
print(f"Skip {href} due to extraction failure.")
|
93 |
+
continue
|
94 |
+
name = profile.get('name')
|
95 |
+
url = profile.get('url')
|
96 |
+
metadata = profile.copy()
|
97 |
+
metadata.pop('name', None)
|
98 |
+
metadata.pop('url', None)
|
99 |
+
item['name'] = name
|
100 |
+
item['url'] = url
|
101 |
+
item['metadata'] = metadata
|
102 |
+
# Write back in real time
|
103 |
+
with open(servers_path, 'w', encoding='utf-8') as f:
|
104 |
+
json.dump(servers, f, ensure_ascii=False, indent=2)
|
105 |
+
print(f"Updated {idx+1}/{len(servers)}: {name}")
|
106 |
+
time.sleep(1)
|
107 |
+
print(f"All servers updated in {servers_path}")
|
108 |
+
|
109 |
+
if __name__ == "__main__":
|
110 |
+
#
|
111 |
+
# url = "https://mcp.so/server/zhipu-web-search/BigModel?_rsc=n713a"
|
112 |
+
# headers = {
|
113 |
+
# "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36",
|
114 |
+
# "Accept": "*/*",
|
115 |
+
# "Accept-Encoding": "gzip, deflate, br, zstd",
|
116 |
+
# "Accept-Language": "zh-CN,zh;q=0.9",
|
117 |
+
# "Next-Url": "/en/server/zhipu-web-search/BigModel",
|
118 |
+
# "Priority": "u=1, i",
|
119 |
+
# "Referer": "https://mcp.so/server/zhipu-web-search/BigModel",
|
120 |
+
# "Rsc": "1",
|
121 |
+
# "Sec-Ch-Ua": '"Chromium";v="136", "Google Chrome";v="136", "Not.A/Brand";v="99"',
|
122 |
+
# "Sec-Ch-Ua-Arch": "arm",
|
123 |
+
# "Sec-Ch-Ua-Bitness": "64",
|
124 |
+
# "Sec-Ch-Ua-Full-Version": "136.0.7103.114",
|
125 |
+
# "Sec-Ch-Ua-Full-Version-List": '"Chromium";v="136.0.7103.114", "Google Chrome";v="136.0.7103.114", "Not.A/Brand";v="99.0.0.0"',
|
126 |
+
# "Sec-Ch-Ua-Mobile": "?0",
|
127 |
+
# "Sec-Ch-Ua-Model": '""',
|
128 |
+
# "Sec-Ch-Ua-Platform": '"macOS"',
|
129 |
+
# "Sec-Ch-Ua-Platform-Version": '"15.3.0"',
|
130 |
+
# "Sec-Fetch-Dest": "empty",
|
131 |
+
# "Sec-Fetch-Mode": "cors",
|
132 |
+
# "Sec-Fetch-Site": "same-origin"
|
133 |
+
# }
|
134 |
+
# profile = request_server_detail(url, headers)
|
135 |
+
# if profile:
|
136 |
+
# with open("server_zhipu-web-search_BigModel_profile.json", "w", encoding="utf-8") as f:
|
137 |
+
# json.dump(profile, f, ensure_ascii=False, indent=2)
|
138 |
+
# print("Profile saved to server_zhipu-web-search_BigModel_profile.json")
|
139 |
+
# else:
|
140 |
+
# print("Profile extraction failed!")
|
141 |
+
#
|
142 |
+
batch_request_servers()
|
Crawler/Servers/mcpso_servers.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Crawler/data_cleaner.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
MCP Servers Data Cleaning Script
|
5 |
+
Keep only user-specified core fields and simplify data structure
|
6 |
+
"""
|
7 |
+
|
8 |
+
import json
|
9 |
+
import os
|
10 |
+
from typing import Dict, Any, List
|
11 |
+
|
12 |
+
def clean_server_data(original_data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
13 |
+
"""
|
14 |
+
Clean server data, keep only specified fields, and filter out items without URLs
|
15 |
+
"""
|
16 |
+
cleaned_data = []
|
17 |
+
filtered_count = 0
|
18 |
+
|
19 |
+
for item in original_data:
|
20 |
+
# Filter out items without URLs
|
21 |
+
url = item.get('url')
|
22 |
+
if not url or url.strip() == '':
|
23 |
+
filtered_count += 1
|
24 |
+
continue
|
25 |
+
|
26 |
+
metadata = item.get('metadata', {})
|
27 |
+
github = item.get('github', {})
|
28 |
+
|
29 |
+
# Build cleaned data structure
|
30 |
+
cleaned_item = {
|
31 |
+
# Unique identifier
|
32 |
+
"id": metadata.get('id'),
|
33 |
+
|
34 |
+
# Basic information
|
35 |
+
"name": item.get('name'),
|
36 |
+
"url": url,
|
37 |
+
|
38 |
+
# metadata core fields
|
39 |
+
"title": metadata.get('title'),
|
40 |
+
"description": metadata.get('description'),
|
41 |
+
"author_name": metadata.get('author_name'),
|
42 |
+
"tags": metadata.get('tags'),
|
43 |
+
"category": metadata.get('category'),
|
44 |
+
"type": metadata.get('type'),
|
45 |
+
"tools": metadata.get('tools'),
|
46 |
+
"sse_url": metadata.get('sse_url'),
|
47 |
+
"server_command": metadata.get('server_command'),
|
48 |
+
"server_config": metadata.get('server_config'),
|
49 |
+
|
50 |
+
# github complete object
|
51 |
+
"github": github if github else None
|
52 |
+
}
|
53 |
+
|
54 |
+
cleaned_data.append(cleaned_item)
|
55 |
+
|
56 |
+
print(f"Filtered out items without URLs: {filtered_count} records")
|
57 |
+
return cleaned_data
|
58 |
+
|
59 |
+
def main():
|
60 |
+
"""Main function"""
|
61 |
+
input_file = 'mcpso_servers.json'
|
62 |
+
output_file = 'mcpso_servers_cleaned.json'
|
63 |
+
|
64 |
+
# Check input file
|
65 |
+
if not os.path.exists(input_file):
|
66 |
+
print(f"❌ Input file not found: {input_file}")
|
67 |
+
return
|
68 |
+
|
69 |
+
# Read original data
|
70 |
+
print(f"📖 Reading original data: {input_file}")
|
71 |
+
with open(input_file, 'r', encoding='utf-8') as f:
|
72 |
+
original_data = json.load(f)
|
73 |
+
|
74 |
+
print(f"Original data count: {len(original_data)}")
|
75 |
+
|
76 |
+
# Clean data
|
77 |
+
print("🧹 Starting data cleaning...")
|
78 |
+
cleaned_data = clean_server_data(original_data)
|
79 |
+
|
80 |
+
# Statistics
|
81 |
+
github_count = sum(1 for item in cleaned_data if item.get('github'))
|
82 |
+
print(f"Cleaned data count: {len(cleaned_data)}")
|
83 |
+
print(f"Contains GitHub information: {github_count} records")
|
84 |
+
|
85 |
+
# Write cleaned data
|
86 |
+
print(f"💾 Writing cleaned data: {output_file}")
|
87 |
+
with open(output_file, 'w', encoding='utf-8') as f:
|
88 |
+
json.dump(cleaned_data, f, ensure_ascii=False, indent=2)
|
89 |
+
|
90 |
+
# Calculate file size change
|
91 |
+
original_size = os.path.getsize(input_file) / 1024 / 1024 # MB
|
92 |
+
cleaned_size = os.path.getsize(output_file) / 1024 / 1024 # MB
|
93 |
+
size_reduction = (1 - cleaned_size / original_size) * 100
|
94 |
+
|
95 |
+
print(f"\n📊 Cleaning Results:")
|
96 |
+
print(f"Original file size: {original_size:.2f} MB")
|
97 |
+
print(f"Cleaned file size: {cleaned_size:.2f} MB")
|
98 |
+
print(f"Size reduction: {size_reduction:.1f}%")
|
99 |
+
print(f"✅ Data cleaning completed!")
|
100 |
+
|
101 |
+
if __name__ == "__main__":
|
102 |
+
main()
|
Crawler/github_info_collector.py
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import sys
|
4 |
+
import json
|
5 |
+
import time
|
6 |
+
import requests
|
7 |
+
from urllib.parse import urlparse
|
8 |
+
|
9 |
+
def robust_request(url, headers=None, max_retries=5, timeout=30):
|
10 |
+
"""
|
11 |
+
Robust requests.get with retry logic and GitHub API rate limit handling.
|
12 |
+
"""
|
13 |
+
delay = 5
|
14 |
+
for attempt in range(max_retries):
|
15 |
+
try:
|
16 |
+
resp = requests.get(url, headers=headers, timeout=timeout)
|
17 |
+
if resp.status_code == 403:
|
18 |
+
# Check if it's rate limit
|
19 |
+
if 'X-RateLimit-Remaining' in resp.headers and resp.headers['X-RateLimit-Remaining'] == '0':
|
20 |
+
reset_ts = int(resp.headers.get('X-RateLimit-Reset', time.time() + 60))
|
21 |
+
wait_sec = max(reset_ts - int(time.time()), 5)
|
22 |
+
print(f"[Rate Limit] API rate limit, waiting {wait_sec} seconds...")
|
23 |
+
time.sleep(wait_sec)
|
24 |
+
continue
|
25 |
+
else:
|
26 |
+
print(f"[WARN] 403 Forbidden: {url}")
|
27 |
+
time.sleep(delay)
|
28 |
+
delay = min(delay * 2, 120)
|
29 |
+
continue
|
30 |
+
elif resp.status_code in (429, 502, 503, 504):
|
31 |
+
print(f"[WARN] {resp.status_code}, retrying {url}, waiting {delay} seconds...")
|
32 |
+
time.sleep(delay)
|
33 |
+
delay = min(delay * 2, 120)
|
34 |
+
continue
|
35 |
+
return resp
|
36 |
+
except Exception as e:
|
37 |
+
print(f"[ERROR] Network exception: {e}, retrying {url}, waiting {delay} seconds...")
|
38 |
+
time.sleep(delay)
|
39 |
+
delay = min(delay * 2, 120)
|
40 |
+
print(f"[FATAL] Multiple retries failed: {url}")
|
41 |
+
return None
|
42 |
+
|
43 |
+
def extract_github_repo(url):
|
44 |
+
"""Extract owner/repo from GitHub URL"""
|
45 |
+
if not url or 'github.com' not in url:
|
46 |
+
return None
|
47 |
+
m = re.search(r'github.com/([\w\-\.]+)/([\w\-\.]+)', url)
|
48 |
+
if m:
|
49 |
+
return f"{m.group(1)}/{m.group(2)}"
|
50 |
+
return None
|
51 |
+
|
52 |
+
def get_github_info(full_name, token=None):
|
53 |
+
"""Get GitHub repository information"""
|
54 |
+
headers = {'Accept': 'application/vnd.github+json'}
|
55 |
+
if token:
|
56 |
+
headers['Authorization'] = f'token {token}'
|
57 |
+
api_url = f'https://api.github.com/repos/{full_name}'
|
58 |
+
repo_resp = robust_request(api_url, headers)
|
59 |
+
if not repo_resp or repo_resp.status_code != 200:
|
60 |
+
print(f"[WARN] Failed to get repository info: {full_name}")
|
61 |
+
return None
|
62 |
+
repo = repo_resp.json()
|
63 |
+
# Contributors count
|
64 |
+
contrib_url = f'https://api.github.com/repos/{full_name}/contributors?per_page=1&anon=true'
|
65 |
+
contrib_resp = robust_request(contrib_url, headers)
|
66 |
+
contributors_count = 0
|
67 |
+
if contrib_resp and contrib_resp.status_code == 200:
|
68 |
+
if 'Link' in contrib_resp.headers and 'last' in contrib_resp.headers['Link']:
|
69 |
+
last_link = contrib_resp.headers['Link'].split(',')[-1]
|
70 |
+
m = re.search(r'&page=(\d+)>; rel="last"', last_link)
|
71 |
+
if m:
|
72 |
+
contributors_count = int(m.group(1))
|
73 |
+
else:
|
74 |
+
contributors_count = len(contrib_resp.json())
|
75 |
+
# Language statistics
|
76 |
+
lang_url = f'https://api.github.com/repos/{full_name}/languages'
|
77 |
+
lang_resp = robust_request(lang_url, headers)
|
78 |
+
languages = lang_resp.json() if lang_resp and lang_resp.status_code == 200 else {}
|
79 |
+
# File detection
|
80 |
+
tree_url = f'https://api.github.com/repos/{full_name}/git/trees/{repo.get('default_branch', 'main')}?recursive=1'
|
81 |
+
tree_resp = robust_request(tree_url, headers)
|
82 |
+
has_docker = has_readme = has_requirements = False
|
83 |
+
if tree_resp and tree_resp.status_code == 200:
|
84 |
+
files = [item['path'].lower() for item in tree_resp.json().get('tree', []) if item['type'] == 'blob']
|
85 |
+
has_docker = any('dockerfile' in f for f in files)
|
86 |
+
has_readme = any(f.startswith('readme') for f in files)
|
87 |
+
has_requirements = any('requirements.txt' in f for f in files)
|
88 |
+
# Last commit
|
89 |
+
commit_url = f'https://api.github.com/repos/{full_name}/commits?per_page=1'
|
90 |
+
commit_resp = robust_request(commit_url, headers)
|
91 |
+
last_commit = None
|
92 |
+
if commit_resp and commit_resp.status_code == 200 and len(commit_resp.json()) > 0:
|
93 |
+
last_commit = commit_resp.json()[0]['commit']['committer']['date']
|
94 |
+
# license
|
95 |
+
license_name = repo['license']['name'] if repo.get('license') else None
|
96 |
+
return {
|
97 |
+
"full_name": full_name,
|
98 |
+
"stargazers_count": repo.get('stargazers_count', 0),
|
99 |
+
"forks_count": repo.get('forks_count', 0),
|
100 |
+
"open_issues_count": repo.get('open_issues_count', 0),
|
101 |
+
"contributors_count": contributors_count,
|
102 |
+
"language": repo.get('language'),
|
103 |
+
"languages": languages,
|
104 |
+
"license": license_name,
|
105 |
+
"archived": repo.get('archived', False),
|
106 |
+
"has_docker": has_docker,
|
107 |
+
"has_readme": has_readme,
|
108 |
+
"has_requirements": has_requirements,
|
109 |
+
"last_commit": last_commit
|
110 |
+
}
|
111 |
+
|
112 |
+
def update_json_file(json_path, token=None):
|
113 |
+
with open(json_path, 'r', encoding='utf-8') as f:
|
114 |
+
servers = json.load(f)
|
115 |
+
changed = False
|
116 |
+
for idx, item in enumerate(servers):
|
117 |
+
url = item.get('url')
|
118 |
+
if not url or 'github.com' not in url:
|
119 |
+
continue
|
120 |
+
if 'github' in item and item['github']:
|
121 |
+
continue # Already collected
|
122 |
+
full_name = extract_github_repo(url)
|
123 |
+
if not full_name:
|
124 |
+
continue
|
125 |
+
print(f"[{idx+1}/{len(servers)}] Collecting {full_name} ...")
|
126 |
+
info = get_github_info(full_name, token)
|
127 |
+
if info:
|
128 |
+
item['github'] = info
|
129 |
+
changed = True
|
130 |
+
# Write back in real time
|
131 |
+
with open(json_path, 'w', encoding='utf-8') as f:
|
132 |
+
json.dump(servers, f, ensure_ascii=False, indent=2)
|
133 |
+
time.sleep(0.5) # Prevent API rate limiting
|
134 |
+
if changed:
|
135 |
+
print(f"All collection completed, written back to {json_path}")
|
136 |
+
else:
|
137 |
+
print("No repositories need to be updated.")
|
138 |
+
|
139 |
+
def test_single_url(url, token=None):
|
140 |
+
full_name = extract_github_repo(url)
|
141 |
+
if not full_name:
|
142 |
+
print("Not a valid GitHub URL")
|
143 |
+
return
|
144 |
+
info = get_github_info(full_name, token)
|
145 |
+
print(json.dumps(info, ensure_ascii=False, indent=2))
|
146 |
+
|
147 |
+
if __name__ == "__main__":
|
148 |
+
import argparse
|
149 |
+
parser = argparse.ArgumentParser(description='Batch/single collection of GitHub repository information')
|
150 |
+
parser.add_argument('--file', type=str, default='mcpso_servers.json', help='JSON file path')
|
151 |
+
parser.add_argument('--url', type=str, help='Single GitHub repository URL')
|
152 |
+
parser.add_argument('--token', type=str, help='GitHub API Token (optional)')
|
153 |
+
args = parser.parse_args()
|
154 |
+
if args.url:
|
155 |
+
test_single_url(args.url, args.token)
|
156 |
+
else:
|
157 |
+
update_json_file(args.file, args.token)
|