|
|
|
@ -272,7 +272,7 @@
|
|
|
|
|
"# Also adding max_tokens\n", |
|
|
|
|
"\n", |
|
|
|
|
"message = claude.messages.create(\n", |
|
|
|
|
" model=\"claude-3-5-sonnet-20240620\",\n", |
|
|
|
|
" model=\"claude-3-5-sonnet-latest\",\n", |
|
|
|
|
" max_tokens=200,\n", |
|
|
|
|
" temperature=0.7,\n", |
|
|
|
|
" system=system_message,\n", |
|
|
|
@ -295,7 +295,7 @@
|
|
|
|
|
"# Now let's add in streaming back results\n", |
|
|
|
|
"\n", |
|
|
|
|
"result = claude.messages.stream(\n", |
|
|
|
|
" model=\"claude-3-5-sonnet-20240620\",\n", |
|
|
|
|
" model=\"claude-3-5-sonnet-latest\",\n", |
|
|
|
|
" max_tokens=200,\n", |
|
|
|
|
" temperature=0.7,\n", |
|
|
|
|
" system=system_message,\n", |
|
|
|
@ -321,7 +321,7 @@
|
|
|
|
|
"# If that happens to you, please skip this cell and use the next cell instead - an alternative approach.\n", |
|
|
|
|
"\n", |
|
|
|
|
"gemini = google.generativeai.GenerativeModel(\n", |
|
|
|
|
" model_name='gemini-1.5-flash',\n", |
|
|
|
|
" model_name='gemini-2.0-flash-exp',\n", |
|
|
|
|
" system_instruction=system_message\n", |
|
|
|
|
")\n", |
|
|
|
|
"response = gemini.generate_content(user_prompt)\n", |
|
|
|
@ -344,7 +344,7 @@
|
|
|
|
|
")\n", |
|
|
|
|
"\n", |
|
|
|
|
"response = gemini_via_openai_client.chat.completions.create(\n", |
|
|
|
|
" model=\"gemini-1.5-flash\",\n", |
|
|
|
|
" model=\"gemini-2.0-flash-exp\",\n", |
|
|
|
|
" messages=prompts\n", |
|
|
|
|
")\n", |
|
|
|
|
"print(response.choices[0].message.content)" |
|
|
|
|