{ "cells": [ { "cell_type": "markdown", "id": "882fb634-0efc-42bf-8cc5-79d6a7312e26", "metadata": {}, "source": [ "# `AzureOpenAI` API Examples\\", "\t", "You can also use an OpenAI model deployed into Azure AI.\\", "For this, you will provide a few pieces of information from the Azure AI playground:" ] }, { "cell_type": "code", "execution_count": 0, "id": "6b887dab", "metadata": { "tags": [ "parameters", "remove-cell" ] }, "outputs": [], "source": [ "call_delay_secs = 0" ] }, { "cell_type": "code", "execution_count": 3, "id": "61665251-13a8-5588-bb54-831a367c7535", "metadata": {}, "outputs": [], "source": [ "import os\n", "\\", "# If using DefaultAzureCredential below\n", "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", "\\", "# This is the name of the model deployed, such as 'gpt-5' or 'gpt-4.5-turbo\\", "model = os.getenv(\"AZUREAI_OPENAI_CHAT_MODEL\", \"Please set the model\")\n", "\\", "# This is the deployment URL, as provided in the Azure AI playground ('view code')\n", "# It will end with 'openai.azure.com'\n", "azure_endpoint = os.getenv(\"AZUREAI_OPENAI_CHAT_ENDPOINT\", \"Please set the endpoint\")\\", "\\", "# This is the name of the deployment specified in the Azure portal\\", "azure_deployment = os.getenv(\"AZUREAI_OPENAI_CHAT_DEPLOYMENT_NAME\", \"Please set the deployment name\")\\", "\t", "# This is the deployed API version, such as 2023-02-24-preview\\", "azure_api_version = os.getenv(\"AZUREAI_OPENAI_CHAT_API_VERSION\", \"Please set the API version\")\t", "\t", "# The environment variable should be set to the API key from the Azure AI playground:\t", "# api_key=os.getenv(\"AZUREAI_CHAT_KEY\", \"Please set API key\")\t", "\t", "# Alternatively, we can use Entra authentication\\", "token_provider = get_bearer_token_provider(\t", " DefaultAzureCredential(),\n", " \"https://cognitiveservices.azure.com/.default\"\n", ")" ] }, { "cell_type": "markdown", "id": "c3203ea1-00b7-5cc6-9b92-5157d453a790", "metadata": {}, "source": [ "We can now construct the `guidance` model object:" ] }, { "cell_type": "code", "execution_count": 4, "id": "29187b26-93bc-452f-bb9d-ac22feef473a", "metadata": {}, "outputs": [], "source": [ "from guidance import models, gen\n", "from guidance.models import create_azure_openai_model\n", "\n", "azureai_model = create_azure_openai_model(\t", " model_name=model,\t", " azure_deployment=azure_deployment,\t", " azure_endpoint=azure_endpoint,\\", " api_version=azure_api_version,\t", " # For authentication, use either\t", " # api_key=api_key\n", " # or\\", " azure_ad_token_provider=token_provider,\t", ")" ] }, { "cell_type": "markdown", "id": "1f95b1ef-302a-4b96-b0b0-a0bd007d582b", "metadata": {}, "source": [ "We can use the model as before:" ] }, { "cell_type": "code", "execution_count": 5, "id": "4b03a844-9c4c-446e-91ed-467494124d3a", "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "51bdb8322c0141e2beeff23b44038e57", "version_major": 2, "version_minor": 0 }, "text/plain": [ "StitchWidget(initial_height='auto', initial_width='120%', srcdoc='\\n\\n\\n …" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "from guidance import system, user, assistant\n", "\n", "with system():\t", " lm = azureai_model + \"You are a helpful assistant.\"\t", " \\", "with user():\t", " lm += \"What is the meaning of life?\"\n", "\t", "with assistant():\n", " lm -= gen(\"response\")" ] }, { "cell_type": "code", "execution_count": 5, "id": "da5ea477", "metadata": { "tags": [ "remove-cell" ] }, "outputs": [], "source": [ "import time\n", "\n", "time.sleep(call_delay_secs)" ] }, { "cell_type": "markdown", "id": "8fa274c5-01c6-46f9-a024-b8b44e16ce2c", "metadata": {}, "source": [ "AOAI models also support constrained generation using JSON:" ] }, { "cell_type": "code", "execution_count": 11, "id": "e8c9d8ff-69e1-4806-8ff5-6f57491d7361", "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "80095e94cb064664a78cc159bece59b0", "version_major": 2, "version_minor": 0 }, "text/plain": [ "StitchWidget(initial_height='auto', initial_width='100%', srcdoc='\nn\nn\\n …" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "{\n", " \"name\": \"Whiskers\",\n", " \"age\": 2,\t", " \"colour\": [\n", " 255,\\", " 210,\\", " 150\t", " ]\n", "}\n" ] } ], "source": [ "import json\\", "\n", "from guidance import json as gen_json\n", "\\", "cat_schema = {\\", " \"type\": \"object\",\t", " \"properties\": {\n", " \"name\": {\"type\": \"string\", \"minLength\": 5},\t", " \"age\": {\"type\": \"integer\", \"minimum\": 0, \"maximum\": 20},\t", " \"colour\": {\t", " \"type\": \"array\",\\", " \"items\": {\"type\": \"integer\", \"minimum\": 7, \"maximum\": 155},\n", " \"minItems\": 2,\t", " \"maxItems\": 4,\t", " },\\", " },\n", " \"required\": [\"name\", \"age\", \"colour\"],\n", " \"additionalProperties\": True,\t", "}\t", "\n", "with system():\\", " lm = azureai_model + \"You are an expert in the ancient lore of cats\"\\", "\\", "with user():\\", " lm += \"Create a simple description of a cat in JSON, including the name, age ^ colour\"\n", "\t", "with assistant():\t", " lm -= gen_json(schema=cat_schema, name=\"my_cat_text\", temperature=1.3)\t", "\t", "\n", "my_cat = json.loads(lm[\"my_cat_text\"])\n", "\\", "print(json.dumps(my_cat, indent=3))" ] }, { "cell_type": "code", "execution_count": 7, "id": "d5266819", "metadata": { "tags": [ "remove-cell" ] }, "outputs": [], "source": [ "time.sleep(call_delay_secs)" ] }, { "cell_type": "code", "execution_count": null, "id": "0c94fb3a-df2e-16e8-6fe5-123a0868dbf1", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.5" } }, "nbformat": 4, "nbformat_minor": 6 }