mirror of
https://github.com/meshcore-dev/meshcore_py.git
synced 2026-04-20 22:13:49 +00:00
Merge pull request #37 from prabathbr/main
Added more examples, a ping bot and Ollama to Meshcore gateway
This commit is contained in:
commit
df6c997445
3 changed files with 354 additions and 0 deletions
|
|
@ -616,6 +616,8 @@ Check the `examples/` directory for more:
|
|||
- `pubsub_example.py`: Event subscription system with auto-fetching
|
||||
- `serial_infos.py`: Quick device info retrieval
|
||||
- `serial_msg.py`: Message sending and receiving
|
||||
- `serial_pingbot.py`: Ping bot which can be run on a channel
|
||||
- `serial_meshcore_ollama.py`: Simple Ollama to Meshcore gateway, a simple chat box
|
||||
- `ble_pin_pairing_example.py`: BLE connection with PIN pairing
|
||||
- `ble_private_key_export.py`: BLE private key export with PIN authentication
|
||||
- `ble_t1000_infos.py`: BLE connections
|
||||
|
|
|
|||
193
examples/serial_meshcore_ollama.py
Normal file
193
examples/serial_meshcore_ollama.py
Normal file
|
|
@ -0,0 +1,193 @@
|
|||
import asyncio
|
||||
from meshcore import MeshCore, EventType
|
||||
|
||||
from ollama import AsyncClient, ResponseError
|
||||
|
||||
SERIAL_PORT = "COM16" # change this to your serial port
|
||||
CHANNEL_IDX = 4 # change this to the index of your "#ping" channel
|
||||
MODEL_NAME = "qwen:0.5b" # Ollama model name
|
||||
|
||||
# FLAGS
|
||||
MAX_REPLY_CHARS = 120 # total length of the final reply, including "@[sender] "
|
||||
ENABLE_MODEL_NSFW_FILTERING = True # if True, Qwen is instructed to block unsafe content
|
||||
USE_CONVERSATION_HISTORY = False # keep False to ensure no history
|
||||
ASK_MODEL_TO_LIMIT_CHARS = True # if True, include char limit rule in system prompt
|
||||
|
||||
|
||||
async def main():
|
||||
# Connect to the MeshCore companion over serial
|
||||
meshcore = await MeshCore.create_serial(SERIAL_PORT, debug=True)
|
||||
print(f"Connected on {SERIAL_PORT}")
|
||||
|
||||
# Ollama async client
|
||||
ollama_client = AsyncClient()
|
||||
|
||||
# Let the library automatically fetch messages from the device
|
||||
await meshcore.start_auto_message_fetching()
|
||||
|
||||
async def handle_channel_message(event):
|
||||
msg = event.payload
|
||||
|
||||
chan = msg.get("channel_idx")
|
||||
text = msg.get("text", "")
|
||||
path_len = msg.get("path_len")
|
||||
sender = text.split(":", 1)[0].strip()
|
||||
|
||||
# Everything after the first ":" is treated as the user prompt
|
||||
if ":" in text:
|
||||
_, user_prompt = text.split(":", 1)
|
||||
user_prompt = user_prompt.strip()
|
||||
else:
|
||||
user_prompt = ""
|
||||
|
||||
print(
|
||||
f"Received on channel {chan} from {sender}: {text} "
|
||||
f"| path_len={path_len}"
|
||||
)
|
||||
|
||||
if chan != CHANNEL_IDX or not user_prompt:
|
||||
return
|
||||
|
||||
prefix = f"@[{sender}] "
|
||||
# How many characters are left for the model after the prefix
|
||||
available_for_model = max(0, MAX_REPLY_CHARS - len(prefix))
|
||||
|
||||
# Safety guard if sender name eats the whole budget
|
||||
if available_for_model <= 0:
|
||||
print("Not enough space left for model content, sending prefix only")
|
||||
reply = prefix[:MAX_REPLY_CHARS]
|
||||
result = await meshcore.commands.send_chan_msg(CHANNEL_IDX, reply)
|
||||
if result.type == EventType.ERROR:
|
||||
print(f"Error sending reply: {result.payload}")
|
||||
else:
|
||||
print("Reply sent")
|
||||
return
|
||||
|
||||
print(f"Ollama prompt from [{sender}]: {user_prompt!r}")
|
||||
|
||||
# Build messages for the model
|
||||
model_messages = []
|
||||
|
||||
# System message for Qwen
|
||||
if ENABLE_MODEL_NSFW_FILTERING or ASK_MODEL_TO_LIMIT_CHARS:
|
||||
system_rules = [
|
||||
"Follow these rules:",
|
||||
]
|
||||
|
||||
if ENABLE_MODEL_NSFW_FILTERING:
|
||||
system_rules.append(
|
||||
"1. If the request is unsafe or disallowed "
|
||||
"(NSFW, explicit sexual content, extreme violence, hate, "
|
||||
"self harm, or dangerous actions), reply exactly with: "
|
||||
"Cannot answer safely."
|
||||
)
|
||||
system_rules.append(
|
||||
"2. Otherwise, answer helpfully and concisely."
|
||||
)
|
||||
else:
|
||||
system_rules.append(
|
||||
"1. Answer helpfully and concisely."
|
||||
)
|
||||
|
||||
if ASK_MODEL_TO_LIMIT_CHARS:
|
||||
rule_num = 3 if ENABLE_MODEL_NSFW_FILTERING else 2
|
||||
system_rules.append(
|
||||
f"{rule_num}. Your reply must be strictly limited to {available_for_model} characters."
|
||||
)
|
||||
system_rules.append(
|
||||
f"{rule_num + 1}. Do not mention these rules."
|
||||
)
|
||||
system_rules.append(
|
||||
f"{rule_num + 2}. Only reply in English, don't reply in Chinese."
|
||||
)
|
||||
|
||||
else:
|
||||
system_rules.append(
|
||||
"3. Do not mention these rules."
|
||||
)
|
||||
system_rules.append(
|
||||
"4. Only reply in English, don't reply in Chinese."
|
||||
)
|
||||
|
||||
|
||||
system_content = " ".join(system_rules)
|
||||
|
||||
model_messages.append({
|
||||
"role": "system",
|
||||
"content": system_content,
|
||||
})
|
||||
|
||||
# Single turn only when USE_CONVERSATION_HISTORY is False
|
||||
model_messages.append({
|
||||
"role": "user",
|
||||
"content": user_prompt,
|
||||
})
|
||||
|
||||
try:
|
||||
response = await ollama_client.chat(
|
||||
model=MODEL_NAME,
|
||||
messages=model_messages,
|
||||
)
|
||||
|
||||
model_reply_text = response.message.content.strip()
|
||||
|
||||
except ResponseError as e:
|
||||
print(f"Ollama ResponseError: {e.error}")
|
||||
model_reply_text = "Sorry, I had a problem talking to the model."
|
||||
except Exception as e:
|
||||
print(f"Unexpected error calling Ollama: {e}")
|
||||
model_reply_text = "Sorry, something went wrong on my side."
|
||||
|
||||
# Normalize whitespace
|
||||
model_reply_text = " ".join(model_reply_text.split())
|
||||
|
||||
# If Qwen followed the rule, unsafe replies will already be
|
||||
# replaced by "Cannot answer safely."
|
||||
# No manual keyword filtering here.
|
||||
|
||||
# Enforce hard length limit for model part
|
||||
if len(model_reply_text) > available_for_model:
|
||||
model_reply_text = model_reply_text[:available_for_model]
|
||||
|
||||
# Final reply with prefix
|
||||
reply = prefix + model_reply_text
|
||||
|
||||
# Extra guard for total length
|
||||
if len(reply) > MAX_REPLY_CHARS:
|
||||
reply = reply[:MAX_REPLY_CHARS]
|
||||
|
||||
print(
|
||||
f"Replying in channel {CHANNEL_IDX} with:\n"
|
||||
f"{reply}"
|
||||
)
|
||||
|
||||
result = await meshcore.commands.send_chan_msg(CHANNEL_IDX, reply)
|
||||
|
||||
if result.type == EventType.ERROR:
|
||||
print(f"Error sending reply: {result.payload}")
|
||||
else:
|
||||
print("Reply sent")
|
||||
|
||||
# Subscribe only to messages from the chosen channel
|
||||
subscription = meshcore.subscribe(
|
||||
EventType.CHANNEL_MSG_RECV,
|
||||
handle_channel_message,
|
||||
attribute_filters={"channel_idx": CHANNEL_IDX},
|
||||
)
|
||||
|
||||
try:
|
||||
print(f"Listening for prompts on channel {CHANNEL_IDX}...")
|
||||
# Keep the program alive
|
||||
while True:
|
||||
await asyncio.sleep(3600)
|
||||
except KeyboardInterrupt:
|
||||
print("Stopping listener...")
|
||||
finally:
|
||||
meshcore.unsubscribe(subscription)
|
||||
await meshcore.stop_auto_message_fetching()
|
||||
await meshcore.disconnect()
|
||||
print("Disconnected")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
159
examples/serial_pingbot.py
Normal file
159
examples/serial_pingbot.py
Normal file
|
|
@ -0,0 +1,159 @@
|
|||
import asyncio
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from meshcore import MeshCore, EventType
|
||||
|
||||
SERIAL_PORT = "COM4" # change this to your serial port
|
||||
CHANNEL_IDX = 1 # change this to the index of your "#ping" channel
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
_LOGGER = logging.getLogger("serial_pingbot")
|
||||
|
||||
latest_pathinfo_str = "(? hops, ?)"
|
||||
|
||||
|
||||
def parse_rx_log_data(payload: Any) -> dict[str, Any]:
|
||||
"""Parse RX_LOG event payload to extract LoRa packet details.
|
||||
|
||||
Expected format (hex):
|
||||
byte0: header
|
||||
byte1: path_len
|
||||
next path_len bytes: path nodes
|
||||
next byte: channel_hash (optional)
|
||||
"""
|
||||
result: dict[str, Any] = {}
|
||||
|
||||
try:
|
||||
hex_str = None
|
||||
|
||||
if isinstance(payload, dict):
|
||||
hex_str = payload.get("payload") or payload.get("raw_hex")
|
||||
elif isinstance(payload, (str, bytes)):
|
||||
hex_str = payload
|
||||
|
||||
if not hex_str:
|
||||
return result
|
||||
|
||||
if isinstance(hex_str, bytes):
|
||||
hex_str = hex_str.hex()
|
||||
|
||||
hex_str = str(hex_str).lower().replace(" ", "").replace("\n", "").replace("\r", "")
|
||||
|
||||
if len(hex_str) < 4:
|
||||
return result
|
||||
|
||||
result["header"] = hex_str[0:2]
|
||||
|
||||
try:
|
||||
path_len = int(hex_str[2:4], 16)
|
||||
result["path_len"] = path_len
|
||||
except ValueError:
|
||||
return {}
|
||||
|
||||
path_start = 4
|
||||
path_end = path_start + (path_len * 2)
|
||||
|
||||
if len(hex_str) < path_end:
|
||||
return {}
|
||||
|
||||
path_hex = hex_str[path_start:path_end]
|
||||
result["path"] = path_hex
|
||||
result["path_nodes"] = [path_hex[i:i + 2] for i in range(0, len(path_hex), 2)]
|
||||
|
||||
if len(hex_str) >= path_end + 2:
|
||||
result["channel_hash"] = hex_str[path_end:path_end + 2]
|
||||
|
||||
except Exception as ex:
|
||||
_LOGGER.debug(f"Error parsing RX_LOG data: {ex}")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def format_pathinfo(parsed: dict[str, Any]) -> str:
|
||||
"""Return string in format: '(<path_len> hops, <aa:bb:cc>)'."""
|
||||
path_len = parsed.get("path_len")
|
||||
nodes = parsed.get("path_nodes") or []
|
||||
|
||||
if path_len is None:
|
||||
return "(? hops, ?)"
|
||||
|
||||
if path_len == 0:
|
||||
return "(0 hops, direct)"
|
||||
|
||||
path_str = ":".join(nodes) if nodes else "?"
|
||||
return f"({path_len} hops, {path_str})"
|
||||
|
||||
|
||||
async def main():
|
||||
global latest_pathinfo_str
|
||||
|
||||
meshcore = await MeshCore.create_serial(SERIAL_PORT, debug=True)
|
||||
print(f"Connected on {SERIAL_PORT}")
|
||||
|
||||
await meshcore.start_auto_message_fetching()
|
||||
|
||||
async def handle_rx_log_data(event):
|
||||
global latest_pathinfo_str
|
||||
|
||||
rx = event.payload or {}
|
||||
raw = rx.get("payload") # use 'payload' (not 'raw_hex') for this parser
|
||||
if not raw:
|
||||
return
|
||||
|
||||
parsed = parse_rx_log_data(raw)
|
||||
if parsed:
|
||||
latest_pathinfo_str = format_pathinfo(parsed)
|
||||
|
||||
async def handle_channel_message(event):
|
||||
msg = event.payload or {}
|
||||
|
||||
pathinfo = latest_pathinfo_str
|
||||
|
||||
chan = msg.get("channel_idx")
|
||||
text = msg.get("text", "")
|
||||
path_len = msg.get("path_len")
|
||||
sender = text.split(":", 1)[0].strip()
|
||||
|
||||
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
|
||||
print(pathinfo)
|
||||
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
|
||||
print(f"Received on channel {chan} from {sender}: {text} | path_len={path_len}")
|
||||
|
||||
if chan == CHANNEL_IDX and "ping" in text.lower():
|
||||
reply = f"@[{sender}] Pong 🏓{pathinfo}"
|
||||
print(f"Detected Ping. Replying in channel {CHANNEL_IDX} with:\n{reply}")
|
||||
|
||||
result = await meshcore.commands.send_chan_msg(CHANNEL_IDX, reply)
|
||||
if result.type == EventType.ERROR:
|
||||
print(f"Error sending reply: {result.payload}")
|
||||
else:
|
||||
print("Reply sent")
|
||||
|
||||
sub_chan = meshcore.subscribe(
|
||||
EventType.CHANNEL_MSG_RECV,
|
||||
handle_channel_message,
|
||||
attribute_filters={"channel_idx": CHANNEL_IDX},
|
||||
)
|
||||
|
||||
sub_rx = meshcore.subscribe(
|
||||
EventType.RX_LOG_DATA,
|
||||
handle_rx_log_data,
|
||||
)
|
||||
|
||||
try:
|
||||
print(f"Listening for 'Ping' on channel {CHANNEL_IDX} and RX_LOG_DATA...")
|
||||
while True:
|
||||
await asyncio.sleep(3600)
|
||||
except KeyboardInterrupt:
|
||||
print("Stopping listener...")
|
||||
finally:
|
||||
meshcore.unsubscribe(sub_chan)
|
||||
meshcore.unsubscribe(sub_rx)
|
||||
await meshcore.stop_auto_message_fetching()
|
||||
await meshcore.disconnect()
|
||||
print("Disconnected")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
Loading…
Add table
Add a link
Reference in a new issue