Shreyas094 commited on
Commit
d03b227
1 Parent(s): f3e5661

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -3
app.py CHANGED
@@ -19,7 +19,7 @@ import logging
19
  import shutil
20
  import pandas as pd
21
  from docx import Document as DocxDocument
22
-
23
 
24
  # Set up basic configuration for logging
25
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
@@ -40,6 +40,7 @@ MODELS = [
40
  "@cf/meta/llama-3.1-8b-instruct",
41
  "mistralai/Mistral-Nemo-Instruct-2407",
42
  "mistralai/Mathstral-7B-v0.1",
 
43
  "duckduckgo/gpt-4o-mini",
44
  "duckduckgo/claude-3-haiku",
45
  "duckduckgo/llama-3.1-70b",
@@ -352,6 +353,53 @@ def summarize_web_results(query: str, search_results: List[Dict[str, str]], conv
352
  except Exception as e:
353
  return f"An error occurred during summarization: {str(e)}"
354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355
  def get_response_from_excel(query, model, context, num_calls=3, temperature=0.2):
356
  logging.info(f"Getting response from Excel using model: {model}")
357
 
@@ -460,12 +508,19 @@ def respond(message, history, model, temperature, num_calls, use_web_search, sel
460
 
461
  # Process Excel documents
462
  if excel_docs:
463
- for response in get_response_from_excel(message, model, excel_context, num_calls, temperature):
 
464
  yield response
 
 
 
465
 
466
  # Process other documents (PDF, Word)
467
  if other_docs:
468
- if model == "@cf/meta/llama-3.1-8b-instruct":
 
 
 
469
  for response in get_response_from_cloudflare(prompt="", context=other_context, query=message, num_calls=num_calls, temperature=temperature, search_type="document"):
470
  yield response
471
  else:
 
19
  import shutil
20
  import pandas as pd
21
  from docx import Document as DocxDocument
22
+ import google.generativeai as genai
23
 
24
  # Set up basic configuration for logging
25
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
 
40
  "@cf/meta/llama-3.1-8b-instruct",
41
  "mistralai/Mistral-Nemo-Instruct-2407",
42
  "mistralai/Mathstral-7B-v0.1",
43
+ "gemini-1.5-flash",
44
  "duckduckgo/gpt-4o-mini",
45
  "duckduckgo/claude-3-haiku",
46
  "duckduckgo/llama-3.1-70b",
 
353
  except Exception as e:
354
  return f"An error occurred during summarization: {str(e)}"
355
 
356
+
357
+ def get_response_from_gemini(query, context, file_type, num_calls=1, temperature=0.2):
358
+ # Configure the Gemini API
359
+ genai.configure(api_key=os.environ["GEMINI_API_KEY"])
360
+
361
+ # Define the model
362
+ model = genai.GenerativeModel(
363
+ model_name="gemini-1.5-flash",
364
+ generation_config={
365
+ "temperature": temperature,
366
+ "top_p": 1,
367
+ "top_k": 1,
368
+ "max_output_tokens": 2048,
369
+ },
370
+ )
371
+
372
+ # Create the system instruction based on file type
373
+ if file_type == "excel":
374
+ system_instruction = """You are a highly specialized data analyst with expertise in Excel spreadsheets.
375
+ Your task is to analyze the provided Excel data and answer the user's query accurately and concisely.
376
+ Focus on identifying key metrics, trends, and significant details relevant to the query.
377
+ Do not make assumptions or include information not explicitly supported by the dataset."""
378
+ elif file_type == "pdf":
379
+ system_instruction = """You are a highly specialized document analyst with expertise in extracting information from PDF documents.
380
+ Your task is to analyze the provided PDF content and answer the user's query accurately and comprehensively.
381
+ Focus on key points, important details, and relevant information from the document.
382
+ Ensure your response is strictly based on the provided context."""
383
+ else:
384
+ raise ValueError("Invalid file type. Use 'excel' or 'pdf'.")
385
+
386
+ # Prepare the chat session
387
+ chat_session = model.start_chat(history=[])
388
+
389
+ full_response = ""
390
+ for _ in range(num_calls):
391
+ try:
392
+ # Send the message with context and query
393
+ response = chat_session.send_message(
394
+ f"{system_instruction}\n\nContext:\n{context}\n\nUser query: {query}"
395
+ )
396
+ full_response += response.text + "\n"
397
+ except Exception as e:
398
+ print(f"Error in generating response from Gemini: {str(e)}")
399
+ return f"An error occurred with the Gemini model: {str(e)}. Please try again."
400
+
401
+ return full_response.strip()
402
+
403
  def get_response_from_excel(query, model, context, num_calls=3, temperature=0.2):
404
  logging.info(f"Getting response from Excel using model: {model}")
405
 
 
508
 
509
  # Process Excel documents
510
  if excel_docs:
511
+ if model == "gemini-1.5-flash":
512
+ response = get_response_from_gemini(message, excel_context, "excel", num_calls, temperature)
513
  yield response
514
+ else:
515
+ for response in get_response_from_excel(message, model, excel_context, num_calls, temperature):
516
+ yield response
517
 
518
  # Process other documents (PDF, Word)
519
  if other_docs:
520
+ if model == "gemini-1.5-flash":
521
+ response = get_response_from_gemini(message, other_context, "pdf", num_calls, temperature)
522
+ yield response
523
+ elif model == "@cf/meta/llama-3.1-8b-instruct":
524
  for response in get_response_from_cloudflare(prompt="", context=other_context, query=message, num_calls=num_calls, temperature=temperature, search_type="document"):
525
  yield response
526
  else: