@@ -8,6 +8,10 @@ const PROVIDERS = {
88 anthropic : {
99 keyLink : "https://console.anthropic.com/account/keys" ,
1010 generate : generateWithAnthropic
11+ } ,
12+ ollama : {
13+ keyLink : "https://github.com/Azgaar/Fantasy-Map-Generator/wiki/Ollama-text-generation" ,
14+ generate : generateWithOllama
1115 }
1216} ;
1317
@@ -18,11 +22,16 @@ const MODELS = {
1822 "chatgpt-4o-latest" : "openai" ,
1923 "gpt-4o" : "openai" ,
2024 "gpt-4-turbo" : "openai" ,
21- "o1-preview" : "openai" ,
22- "o1-mini" : "openai" ,
25+ o3 : "openai" ,
26+ "o3-mini" : "openai" ,
27+ "o3-pro" : "openai" ,
28+ "o4-mini" : "openai" ,
29+ "claude-opus-4-20250514" : "anthropic" ,
30+ "claude-sonnet-4-20250514" : "anthropic" ,
2331 "claude-3-5-haiku-latest" : "anthropic" ,
2432 "claude-3-5-sonnet-latest" : "anthropic" ,
25- "claude-3-opus-latest" : "anthropic"
33+ "claude-3-opus-latest" : "anthropic" ,
34+ "ollama (local models)" : "ollama"
2635} ;
2736
2837const SYSTEM_MESSAGE = "I'm working on my fantasy map." ;
@@ -76,10 +85,36 @@ async function generateWithAnthropic({key, model, prompt, temperature, onContent
7685 await handleStream ( response , getContent ) ;
7786}
7887
88+ async function generateWithOllama ( { key, model, prompt, temperature, onContent} ) {
89+ const ollamaModelName = key ; // for Ollama, 'key' is the actual model name entered by the user
90+
91+ const response = await fetch ( "http://localhost:11434/api/generate" , {
92+ method : "POST" ,
93+ headers : { "Content-Type" : "application/json" } ,
94+ body : JSON . stringify ( {
95+ model : ollamaModelName ,
96+ prompt,
97+ system : SYSTEM_MESSAGE ,
98+ options : { temperature} ,
99+ stream : true
100+ } )
101+ } ) ;
102+
103+ const getContent = json => {
104+ if ( json . response ) onContent ( json . response ) ;
105+ } ;
106+
107+ await handleStream ( response , getContent ) ;
108+ }
109+
79110async function handleStream ( response , getContent ) {
80111 if ( ! response . ok ) {
81- const json = await response . json ( ) ;
82- throw new Error ( json ?. error ?. message || "Failed to generate" ) ;
112+ let errorMessage = `Failed to generate (${ response . status } ${ response . statusText } )` ;
113+ try {
114+ const json = await response . json ( ) ;
115+ errorMessage = json . error ?. message || json . error || errorMessage ;
116+ } catch { }
117+ throw new Error ( errorMessage ) ;
83118 }
84119
85120 const reader = response . body . getReader ( ) ;
@@ -95,13 +130,14 @@ async function handleStream(response, getContent) {
95130
96131 for ( let i = 0 ; i < lines . length - 1 ; i ++ ) {
97132 const line = lines [ i ] . trim ( ) ;
98- if ( line . startsWith ( "data: " ) && line !== "data: [DONE]" ) {
99- try {
100- const json = JSON . parse ( line . slice ( 6 ) ) ;
101- getContent ( json ) ;
102- } catch ( jsonError ) {
103- ERROR && console . error ( `Failed to parse JSON:` , jsonError , `Line: ${ line } ` ) ;
104- }
133+ if ( ! line ) continue ;
134+ if ( line === "data: [DONE]" ) break ;
135+
136+ try {
137+ const parsed = line . startsWith ( "data: " ) ? JSON . parse ( line . slice ( 6 ) ) : JSON . parse ( line ) ;
138+ getContent ( parsed ) ;
139+ } catch ( error ) {
140+ ERROR && console . error ( "Failed to parse line:" , line , error ) ;
105141 }
106142 }
107143
0 commit comments