gokaygokay commited on
Commit
ac150a8
1 Parent(s): 8433342

prompt_types

Browse files
huggingface_inference_node.py CHANGED
@@ -62,9 +62,10 @@ You are allowed to make up film and branding names, and do them like 80's, 90's
62
  "fantasy": fantasy_prompt
63
  }
64
 
65
- # Update this part to handle the prompt_type correctly
66
- if prompt_type and prompt_type in prompt_types:
67
- base_prompt = prompt_types[prompt_type]
 
68
  print(f"Using {prompt_type} prompt")
69
  elif custom_base_prompt.strip():
70
  base_prompt = custom_base_prompt
 
62
  "fantasy": fantasy_prompt
63
  }
64
 
65
+ print(f"Received prompt_type: '{prompt_type}'") # Debug print
66
+
67
+ if prompt_type and prompt_type.strip() in prompt_types:
68
+ base_prompt = prompt_types[prompt_type.strip()]
69
  print(f"Using {prompt_type} prompt")
70
  elif custom_base_prompt.strip():
71
  base_prompt = custom_base_prompt
ui_components.py CHANGED
@@ -170,9 +170,13 @@ def create_interface():
170
  generate_text_button.click(
171
  generate_text_with_llm,
172
  inputs=[output, happy_talk, compress, compression_level, prompt_type, custom_base_prompt],
173
- outputs=text_output
 
174
  )
175
 
 
 
 
176
  def update_all_options(choice):
177
  updates = {}
178
  if choice == "Disabled":
 
170
  generate_text_button.click(
171
  generate_text_with_llm,
172
  inputs=[output, happy_talk, compress, compression_level, prompt_type, custom_base_prompt],
173
+ outputs=text_output,
174
+ api_name="generate_text" # Add this line
175
  )
176
 
177
+ # Add this line to disable caching for the generate_text_with_llm function
178
+ generate_text_with_llm.cache_examples = False
179
+
180
  def update_all_options(choice):
181
  updates = {}
182
  if choice == "Disabled":