sharsh02 commited on
Commit
5fe8017
1 Parent(s): 68cf750

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -2
app.py CHANGED
@@ -21,6 +21,29 @@ client = InferenceClient(
21
  )
22
  # client = InferenceClient("google/gemma-2b-it")
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  def todays_news():
25
  url = 'https://trendlyne.com/markets-today/'
26
  print("getting news from", url)
@@ -317,7 +340,7 @@ def generate_final_response(prompt, history, ticker_financials=[None], context_f
317
  # top_p = float(top_p)
318
  global display_ticker
319
  generate_kwargs = dict(temperature=0.001,max_new_tokens=1024,top_p=0.95,repetition_penalty=1.0,do_sample=True,seed=42)
320
-
321
  today = datetime.date.today()
322
  todays_date = today.strftime('%d%B%Y')
323
  question = format_prompt(prompt, history)
@@ -346,7 +369,7 @@ def generate_final_response(prompt, history, ticker_financials=[None], context_f
346
  env = Environment(loader=FileSystemLoader("templates/"), autoescape=True)
347
  # env.globals['include'] = lambda filename: env.loader.get_source(env, filename)[0]
348
  template = env.get_template("system_prompt.txt")
349
- content = template.render(todays_date=todays_date,ticker_financials=ticker_financials[0] ,response_type="Response-1",chat_completion_params=chat_completion_params,context_file=context_file[0], question=question,ticker=ticker, ticker_stats = ticker_stats[0], reports=reports[0], news_link=news_link[0])
350
  print(content)
351
  output=""
352
  try:
 
21
  )
22
  # client = InferenceClient("google/gemma-2b-it")
23
 
24
+
25
+ def latest_earning():
26
+ # URL of the webpage you want to scrape
27
+ url = "https://www.moneycontrol.com/markets/earnings/india-inc-earnings/?selected=all"
28
+
29
+ # Send a GET request to fetch the raw HTML content
30
+ response = requests.get(url)
31
+
32
+ # Parse the content using BeautifulSoup
33
+ soup = BeautifulSoup(response.content, "html.parser")
34
+
35
+ # Find all elements with the class rapidResCardWeb_blkTxtOne__cigbf
36
+ elements_with_class = soup.find_all(class_='rapidResCardWeb_blkTxtOne__cigbf')
37
+
38
+ # Iterate over all the elements found
39
+ for element in elements_with_class:
40
+ anchor_tag = element.find('a') # Find the first anchor tag within each element
41
+ if anchor_tag and 'href' in anchor_tag.attrs:
42
+ href = anchor_tag['href']
43
+ earning_link.append(f"<a href='{href}'>{href.split('/')[-2]}</a>")
44
+
45
+ return (','.join(earning_link))
46
+
47
  def todays_news():
48
  url = 'https://trendlyne.com/markets-today/'
49
  print("getting news from", url)
 
340
  # top_p = float(top_p)
341
  global display_ticker
342
  generate_kwargs = dict(temperature=0.001,max_new_tokens=1024,top_p=0.95,repetition_penalty=1.0,do_sample=True,seed=42)
343
+ earning_link = latest_earning()
344
  today = datetime.date.today()
345
  todays_date = today.strftime('%d%B%Y')
346
  question = format_prompt(prompt, history)
 
369
  env = Environment(loader=FileSystemLoader("templates/"), autoescape=True)
370
  # env.globals['include'] = lambda filename: env.loader.get_source(env, filename)[0]
371
  template = env.get_template("system_prompt.txt")
372
+ content = template.render(todays_date=todays_date,ticker_financials=ticker_financials[0] ,response_type="Response-1",chat_completion_params=chat_completion_params,context_file=context_file[0], question=question,ticker=ticker, ticker_stats = ticker_stats[0], reports=reports[0], news_link=news_link[0], earnings = earning_link)
373
  print(content)
374
  output=""
375
  try: