-
Notifications
You must be signed in to change notification settings - Fork 307
Expand file tree
/
Copy pathProgram.cs
More file actions
94 lines (75 loc) · 2.79 KB
/
Program.cs
File metadata and controls
94 lines (75 loc) · 2.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
using Microsoft.AI.Foundry.Local;
using OpenAI;
using System.ClientModel;
var config = new Configuration
{
AppName = "foundry_local_samples",
LogLevel = Microsoft.AI.Foundry.Local.LogLevel.Information,
Web = new Configuration.WebService
{
Urls = "http://127.0.0.1:52495"
}
};
// Initialize the singleton instance.
await FoundryLocalManager.CreateAsync(config, Utils.GetAppLogger());
var mgr = FoundryLocalManager.Instance;
// Ensure that any Execution Provider (EP) downloads run and are completed.
// EP packages include dependencies and may be large.
// Download is only required again if a new version of the EP is released.
// For cross platform builds there is no dynamic EP download and this will return immediately.
await Utils.RunWithSpinner("Registering execution providers", mgr.EnsureEpsDownloadedAsync());
// Get the model catalog
var catalog = await mgr.GetCatalogAsync();
// Get a model using an alias
var model = await catalog.GetModelAsync("qwen2.5-0.5b") ?? throw new Exception("Model not found");
// Check cache before downloading — skip download if model is already cached
if (!await model.IsCachedAsync())
{
Console.WriteLine($"Model \"{model.Id}\" not found in cache. Downloading...");
await model.DownloadAsync(progress =>
{
var filled = (int)Math.Round(progress / 100.0 * 30);
var bar = new string('\u2588', filled) + new string('\u2591', 30 - filled);
Console.Write($"\rDownloading: [{bar}] {progress:F1}%");
if (progress >= 100f)
{
Console.WriteLine();
}
});
Console.WriteLine("\u2713 Model downloaded");
}
else
{
Console.WriteLine($"\u2713 Model \"{model.Id}\" already cached \u2014 skipping download");
}
// Load the model into memory
Console.Write($"Loading model {model.Id}...");
await model.LoadAsync();
Console.WriteLine("done. \u2713 Model ready");
// Start the web service
Console.Write($"Starting web service on {config.Web.Urls}...");
await mgr.StartWebServiceAsync();
Console.WriteLine("done.");
// <<<<<< OPEN AI SDK USAGE >>>>>>
// Use the OpenAI SDK to call the local Foundry web service
ApiKeyCredential key = new ApiKeyCredential("notneeded");
OpenAIClient client = new OpenAIClient(key, new OpenAIClientOptions
{
Endpoint = new Uri(config.Web.Urls + "/v1"),
});
var chatClient = client.GetChatClient(model.Id);
var completionUpdates = chatClient.CompleteChatStreaming("Why is the sky blue?");
Console.Write($"[ASSISTANT]: ");
foreach (var completionUpdate in completionUpdates)
{
if (completionUpdate.ContentUpdate.Count > 0)
{
Console.Write(completionUpdate.ContentUpdate[0].Text);
}
}
Console.WriteLine();
// <<<<<< END OPEN AI SDK USAGE >>>>>>
// Tidy up
// Stop the web service and unload model
await mgr.StopWebServiceAsync();
await model.UnloadAsync();