@@ -28,14 +28,46 @@ import asyncio
2828from copilot import CopilotClient
2929
3030async def main ():
31- # Create and start client
31+ # Client automatically starts on enter and cleans up on exit
32+ async with CopilotClient() as client:
33+ # Create a session with automatic cleanup
34+ async with await client.create_session({" model" : " gpt-4o" }) as session:
35+ # Wait for response using session.idle event
36+ done = asyncio.Event()
37+
38+ def on_event (event ):
39+ if event.type.value == " assistant.message" :
40+ print (event.data.content)
41+ elif event.type.value == " session.idle" :
42+ done.set()
43+
44+ session.on(on_event)
45+
46+ # Send a message and wait for completion
47+ await session.send(" What is 2+2?" )
48+ await done.wait()
49+
50+ # Session automatically disconnected here
51+
52+ # Client automatically stopped here
53+
54+ asyncio.run(main())
55+ ```
56+
57+ ### Manual Resource Management
58+
59+ If you need more control over the lifecycle, you can call ` start() ` , ` stop() ` , and ` disconnect() ` manually:
60+
61+ ``` python
62+ import asyncio
63+ from copilot import CopilotClient
64+
65+ async def main ():
3266 client = CopilotClient()
3367 await client.start()
3468
35- # Create a session
36- session = await client.create_session({" model" : " gpt-5" })
69+ session = await client.create_session({" model" : " gpt-4o" })
3770
38- # Wait for response using session.idle event
3971 done = asyncio.Event()
4072
4173 def on_event (event ):
@@ -45,34 +77,25 @@ async def main():
4577 done.set()
4678
4779 session.on(on_event)
48-
49- # Send a message and wait for completion
5080 await session.send(" What is 2+2?" )
5181 await done.wait()
5282
53- # Clean up
83+ # Clean up manually
5484 await session.disconnect()
5585 await client.stop()
5686
5787asyncio.run(main())
5888```
5989
60- Sessions also support the ` async with ` context manager pattern for automatic cleanup:
61-
62- ``` python
63- async with await client.create_session({" model" : " gpt-5" }) as session:
64- await session.send(" What is 2+2?" )
65- # session is automatically disconnected when leaving the block
66- ```
67-
6890## Features
6991
70- - ✅ Full JSON-RPC protocol support
71- - ✅ stdio and TCP transports
72- - ✅ Real-time streaming events
73- - ✅ Session history with ` get_messages() `
74- - ✅ Type hints throughout
75- - ✅ Async/await native
92+ - Full JSON-RPC protocol support
93+ - stdio and TCP transports
94+ - Real-time streaming events
95+ - Session history with ` get_messages() `
96+ - Type hints throughout
97+ - Async/await native
98+ - Async context manager support for automatic resource cleanup
7699
77100## API Reference
78101
@@ -81,24 +104,19 @@ async with await client.create_session({"model": "gpt-5"}) as session:
81104``` python
82105from copilot import CopilotClient, SubprocessConfig
83106
84- # Spawn a local CLI process (default)
85- client = CopilotClient() # uses bundled CLI, stdio transport
86- await client.start()
107+ async with CopilotClient() as client:
108+ async with await client.create_session({" model" : " gpt-4o" }) as session:
109+ def on_event (event ):
110+ print (f " Event: { event[' type' ]} " )
87111
88- session = await client.create_session({" model" : " gpt-5" })
112+ session.on(on_event)
113+ await session.send(" Hello!" )
89114
90- def on_event (event ):
91- print (f " Event: { event[' type' ]} " )
92-
93- session.on(on_event)
94- await session.send(" Hello!" )
95-
96- # ... wait for events ...
97-
98- await session.disconnect()
99- await client.stop()
115+ # ... wait for events ...
100116```
101117
118+ > ** Note:** For manual lifecycle management, see [ Manual Resource Management] ( #manual-resource-management ) above.
119+
102120``` python
103121from copilot import CopilotClient, ExternalServerConfig
104122
@@ -136,7 +154,7 @@ CopilotClient(
136154
137155** SessionConfig Options (for ` create_session ` ):**
138156
139- - ` model ` (str): Model to use ("gpt-5 ", "claude-sonnet-4.5", etc.). ** Required when using custom provider.**
157+ - ` model ` (str): Model to use ("gpt-4o ", "claude-sonnet-4.5", etc.). ** Required when using custom provider.**
140158- ` reasoning_effort ` (str): Reasoning effort level for models that support it ("low", "medium", "high", "xhigh"). Use ` list_models() ` to check which models support this option.
141159- ` session_id ` (str): Custom session ID
142160- ` tools ` (list): Custom tools exposed to the CLI
@@ -192,10 +210,11 @@ async def lookup_issue(params: LookupIssueParams) -> str:
192210 issue = await fetch_issue(params.id)
193211 return issue.summary
194212
195- session = await client.create_session({
196- " model" : " gpt-5 " ,
213+ async with await client.create_session({
214+ " model" : " gpt-4o " ,
197215 " tools" : [lookup_issue],
198- })
216+ }) as session:
217+ ...
199218```
200219
201220> ** Note:** When using ` from __future__ import annotations ` , define Pydantic models at module level (not inside functions).
@@ -216,8 +235,8 @@ async def lookup_issue(invocation):
216235 " sessionLog" : f " Fetched issue { issue_id} " ,
217236 }
218237
219- session = await client.create_session({
220- " model" : " gpt-5 " ,
238+ async with await client.create_session({
239+ " model" : " gpt-4o " ,
221240 " tools" : [
222241 Tool(
223242 name = " lookup_issue" ,
@@ -232,7 +251,8 @@ session = await client.create_session({
232251 handler = lookup_issue,
233252 )
234253 ],
235- })
254+ }) as session:
255+ ...
236256```
237257
238258The SDK automatically handles ` tool.call ` , executes your handler (sync or async), and responds with the final result when the tool completes.
@@ -292,44 +312,38 @@ import asyncio
292312from copilot import CopilotClient
293313
294314async def main ():
295- client = CopilotClient()
296- await client.start()
297-
298- session = await client.create_session({
299- " model" : " gpt-5" ,
300- " streaming" : True
301- })
302-
303- # Use asyncio.Event to wait for completion
304- done = asyncio.Event()
305-
306- def on_event (event ):
307- if event.type.value == " assistant.message_delta" :
308- # Streaming message chunk - print incrementally
309- delta = event.data.delta_content or " "
310- print (delta, end = " " , flush = True )
311- elif event.type.value == " assistant.reasoning_delta" :
312- # Streaming reasoning chunk (if model supports reasoning)
313- delta = event.data.delta_content or " "
314- print (delta, end = " " , flush = True )
315- elif event.type.value == " assistant.message" :
316- # Final message - complete content
317- print (" \n --- Final message ---" )
318- print (event.data.content)
319- elif event.type.value == " assistant.reasoning" :
320- # Final reasoning content (if model supports reasoning)
321- print (" --- Reasoning ---" )
322- print (event.data.content)
323- elif event.type.value == " session.idle" :
324- # Session finished processing
325- done.set()
326-
327- session.on(on_event)
328- await session.send(" Tell me a short story" )
329- await done.wait() # Wait for streaming to complete
330-
331- await session.disconnect()
332- await client.stop()
315+ async with CopilotClient() as client:
316+ async with await client.create_session({
317+ " model" : " gpt-4o" ,
318+ " streaming" : True ,
319+ }) as session:
320+ # Use asyncio.Event to wait for completion
321+ done = asyncio.Event()
322+
323+ def on_event (event ):
324+ if event.type.value == " assistant.message_delta" :
325+ # Streaming message chunk - print incrementally
326+ delta = event.data.delta_content or " "
327+ print (delta, end = " " , flush = True )
328+ elif event.type.value == " assistant.reasoning_delta" :
329+ # Streaming reasoning chunk (if model supports reasoning)
330+ delta = event.data.delta_content or " "
331+ print (delta, end = " " , flush = True )
332+ elif event.type.value == " assistant.message" :
333+ # Final message - complete content
334+ print (" \n --- Final message ---" )
335+ print (event.data.content)
336+ elif event.type.value == " assistant.reasoning" :
337+ # Final reasoning content (if model supports reasoning)
338+ print (" --- Reasoning ---" )
339+ print (event.data.content)
340+ elif event.type.value == " session.idle" :
341+ # Session finished processing
342+ done.set()
343+
344+ session.on(on_event)
345+ await session.send(" Tell me a short story" )
346+ await done.wait() # Wait for streaming to complete
333347
334348asyncio.run(main())
335349```
@@ -349,27 +363,28 @@ By default, sessions use **infinite sessions** which automatically manage contex
349363
350364``` python
351365# Default: infinite sessions enabled with default thresholds
352- session = await client.create_session({" model" : " gpt-5" })
353-
354- # Access the workspace path for checkpoints and files
355- print (session.workspace_path)
356- # => ~/.copilot/session-state/{session_id}/
366+ async with await client.create_session({" model" : " gpt-4o" }) as session:
367+ # Access the workspace path for checkpoints and files
368+ print (session.workspace_path)
369+ # => ~/.copilot/session-state/{session_id}/
357370
358371# Custom thresholds
359- session = await client.create_session({
360- " model" : " gpt-5 " ,
372+ async with await client.create_session({
373+ " model" : " gpt-4o " ,
361374 " infinite_sessions" : {
362375 " enabled" : True ,
363376 " background_compaction_threshold" : 0.80 , # Start compacting at 80% context usage
364377 " buffer_exhaustion_threshold" : 0.95 , # Block at 95% until compaction completes
365378 },
366- })
379+ }) as session:
380+ ...
367381
368382# Disable infinite sessions
369- session = await client.create_session({
370- " model" : " gpt-5 " ,
383+ async with await client.create_session({
384+ " model" : " gpt-4o " ,
371385 " infinite_sessions" : {" enabled" : False },
372- })
386+ }) as session:
387+ ...
373388```
374389
375390When enabled, sessions emit compaction events:
@@ -393,39 +408,39 @@ The SDK supports custom OpenAI-compatible API providers (BYOK - Bring Your Own K
393408** Example with Ollama:**
394409
395410``` python
396- session = await client.create_session({
411+ async with await client.create_session({
397412 " model" : " deepseek-coder-v2:16b" , # Required when using custom provider
398413 " provider" : {
399414 " type" : " openai" ,
400415 " base_url" : " http://localhost:11434/v1" , # Ollama endpoint
401416 # api_key not required for Ollama
402417 },
403- })
404-
405- await session.send(" Hello!" )
418+ }) as session:
419+ await session.send(" Hello!" )
406420```
407421
408422** Example with custom OpenAI-compatible API:**
409423
410424``` python
411425import os
412426
413- session = await client.create_session({
427+ async with await client.create_session({
414428 " model" : " gpt-4" ,
415429 " provider" : {
416430 " type" : " openai" ,
417431 " base_url" : " https://my-api.example.com/v1" ,
418432 " api_key" : os.environ[" MY_API_KEY" ],
419433 },
420- })
434+ }) as session:
435+ ...
421436```
422437
423438** Example with Azure OpenAI:**
424439
425440``` python
426441import os
427442
428- session = await client.create_session({
443+ async with await client.create_session({
429444 " model" : " gpt-4" ,
430445 " provider" : {
431446 " type" : " azure" , # Must be "azure" for Azure endpoints, NOT "openai"
@@ -435,7 +450,8 @@ session = await client.create_session({
435450 " api_version" : " 2024-10-21" ,
436451 },
437452 },
438- })
453+ }) as session:
454+ ...
439455```
440456
441457> ** Important notes:**
@@ -489,10 +505,11 @@ async def handle_user_input(request, invocation):
489505 " wasFreeform" : True , # Whether the answer was freeform (not from choices)
490506 }
491507
492- session = await client.create_session({
493- " model" : " gpt-5 " ,
508+ async with await client.create_session({
509+ " model" : " gpt-4o " ,
494510 " on_user_input_request" : handle_user_input,
495- })
511+ }) as session:
512+ ...
496513```
497514
498515## Session Hooks
@@ -536,8 +553,8 @@ async def on_error_occurred(input, invocation):
536553 " errorHandling" : " retry" , # "retry", "skip", or "abort"
537554 }
538555
539- session = await client.create_session({
540- " model" : " gpt-5 " ,
556+ async with await client.create_session({
557+ " model" : " gpt-4o " ,
541558 " hooks" : {
542559 " on_pre_tool_use" : on_pre_tool_use,
543560 " on_post_tool_use" : on_post_tool_use,
@@ -546,7 +563,8 @@ session = await client.create_session({
546563 " on_session_end" : on_session_end,
547564 " on_error_occurred" : on_error_occurred,
548565 },
549- })
566+ }) as session:
567+ ...
550568```
551569
552570** Available hooks:**
0 commit comments