mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2026-03-18 03:14:39 +01:00
Add num_pages and max_tokens kwargs to web search tools
This commit is contained in:
parent
a09f21b9de
commit
4c7a56c18d
|
|
@ -9,6 +9,7 @@ tool = {
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"url": {"type": "string", "description": "The URL of the web page to fetch."},
|
||||
"max_tokens": {"type": "integer", "description": "Maximum number of tokens in the returned content (default: 2048)."},
|
||||
},
|
||||
"required": ["url"]
|
||||
}
|
||||
|
|
@ -18,6 +19,7 @@ tool = {
|
|||
|
||||
def execute(arguments):
|
||||
url = arguments.get("url", "")
|
||||
max_tokens = arguments.get("max_tokens", 2048)
|
||||
if not url:
|
||||
return {"error": "No URL provided."}
|
||||
|
||||
|
|
@ -25,4 +27,4 @@ def execute(arguments):
|
|||
if not content or not content.strip():
|
||||
return {"error": f"Failed to fetch content from {url}"}
|
||||
|
||||
return {"url": url, "content": truncate_content_by_tokens(content)}
|
||||
return {"url": url, "content": truncate_content_by_tokens(content, max_tokens=max_tokens)}
|
||||
|
|
|
|||
|
|
@ -9,6 +9,8 @@ tool = {
|
|||
"type": "object",
|
||||
"properties": {
|
||||
"query": {"type": "string", "description": "The search query."},
|
||||
"num_pages": {"type": "integer", "description": "Number of search result pages to fetch (default: 3)."},
|
||||
"max_tokens": {"type": "integer", "description": "Maximum number of tokens per page result (default: 2048)."},
|
||||
},
|
||||
"required": ["query"]
|
||||
}
|
||||
|
|
@ -18,10 +20,12 @@ tool = {
|
|||
|
||||
def execute(arguments):
|
||||
query = arguments.get("query", "")
|
||||
results = perform_web_search(query, num_pages=3)
|
||||
num_pages = arguments.get("num_pages", 3)
|
||||
max_tokens = arguments.get("max_tokens", 2048)
|
||||
results = perform_web_search(query, num_pages=num_pages)
|
||||
output = []
|
||||
for r in results:
|
||||
if r and r["content"].strip():
|
||||
output.append({"title": r["title"], "url": r["url"], "content": truncate_content_by_tokens(r["content"])})
|
||||
output.append({"title": r["title"], "url": r["url"], "content": truncate_content_by_tokens(r["content"], max_tokens=max_tokens)})
|
||||
|
||||
return output if output else [{"error": "No results found."}]
|
||||
|
|
|
|||
Loading…
Reference in a new issue