From 684e899812b7749f95ce060d55561ee448e399bb Mon Sep 17 00:00:00 2001 From: Vishnu Raj <88265715+creator-10@users.noreply.github.com> Date: Mon, 2 Dec 2024 22:42:17 +0530 Subject: [PATCH 01/13] Create readme.txt --- hackathon/AI-Care(novathon)/readme.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 hackathon/AI-Care(novathon)/readme.txt diff --git a/hackathon/AI-Care(novathon)/readme.txt b/hackathon/AI-Care(novathon)/readme.txt new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/hackathon/AI-Care(novathon)/readme.txt @@ -0,0 +1 @@ + From 1a93cd8e4f5ac6d3d698fc50bfd0ed12a1d0f47d Mon Sep 17 00:00:00 2001 From: Vishnu Raj <88265715+creator-10@users.noreply.github.com> Date: Mon, 2 Dec 2024 22:44:45 +0530 Subject: [PATCH 02/13] Added python file --- hackathon/AI-Care(novathon)/app.py | 57 +++++++++++++ hackathon/AI-Care(novathon)/google_api_key.py | 1 + hackathon/AI-Care(novathon)/invoiceDecoder.py | 78 ++++++++++++++++++ hackathon/AI-Care(novathon)/scanAi.py | 79 +++++++++++++++++++ 4 files changed, 215 insertions(+) create mode 100644 hackathon/AI-Care(novathon)/app.py create mode 100644 hackathon/AI-Care(novathon)/google_api_key.py create mode 100644 hackathon/AI-Care(novathon)/invoiceDecoder.py create mode 100644 hackathon/AI-Care(novathon)/scanAi.py diff --git a/hackathon/AI-Care(novathon)/app.py b/hackathon/AI-Care(novathon)/app.py new file mode 100644 index 00000000..f1fdeb61 --- /dev/null +++ b/hackathon/AI-Care(novathon)/app.py @@ -0,0 +1,57 @@ +import subprocess +import re +import webbrowser +from flask import Flask, render_template + +app = Flask(__name__) + +# Route to display the buttons +@app.route('/') +def index(): + return render_template('index.html') + +# Route for About Us page (about_us.html) +@app.route('/about_us') +def about_us(): + return render_template('about_us.html') + +# Route for Scan AI page (scan_ai.html) +@app.route('/scan_ai') +def scan_ai(): + return render_template('scan_ai.html') + +# Route for Invoice Decoder page (invoice_decoder.html) +@app.route('/invoice_decoder') +def invoice_decoder(): + return render_template('invoice_decoder.html') + +# Route for Consultation Analysis page (consultation_analysis.html) +@app.route('/consultation_analysis') +def consultation_analysis(): + return render_template('consultation_analysis.html') + +# Route to run scanAi.py with Streamlit +@app.route('/run_button1', methods=['POST']) +def run_button1(): + try: + # Run the scanAi.py Streamlit app in the background + subprocess.Popen(['streamlit', 'run', 'scanAi.py']) + except Exception as e: + print(f"Error running scanAi.py with Streamlit: {str(e)}") + # Stay on the index page without any message + return render_template('index.html') + +# Route to run invoiceDecoder.py with Streamlit +@app.route('/run_button2', methods=['POST']) +def run_button2(): + try: + # Run the invoiceDecoder.py Streamlit app in the background + subprocess.Popen(['streamlit', 'run', 'invoiceDecoder.py']) + except Exception as e: + print(f"Error running invoiceDecoder.py with Streamlit: {str(e)}") + # Stay on the index page without any message + return render_template('index.html') + + +if __name__ == "__main__": + app.run(debug=True) diff --git a/hackathon/AI-Care(novathon)/google_api_key.py b/hackathon/AI-Care(novathon)/google_api_key.py new file mode 100644 index 00000000..b929957f --- /dev/null +++ b/hackathon/AI-Care(novathon)/google_api_key.py @@ -0,0 +1 @@ +google_api_key='AIzaSyB5IM3-8h8SljeUWXl-58pceOev8bON4hU' \ No newline at end of file diff --git a/hackathon/AI-Care(novathon)/invoiceDecoder.py b/hackathon/AI-Care(novathon)/invoiceDecoder.py new file mode 100644 index 00000000..3013b15d --- /dev/null +++ b/hackathon/AI-Care(novathon)/invoiceDecoder.py @@ -0,0 +1,78 @@ +from dotenv import load_dotenv +import streamlit as st +import os +from PIL import Image +import google.generativeai as genai +from llmware.models import ModelCatalog # Import llmware model + +# Load environment variables +load_dotenv() # take environment variables from .env + +# Configure Google Generative AI +genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) + +# Function to get response from Gemini model +def get_gemini_response(input, image, prompt): + model = genai.GenerativeModel('gemini-1.5-flash') + response = model.generate_content([input, image[0], prompt]) + return response.text + +# Function to process uploaded image +def input_image_setup(uploaded_file): + if uploaded_file is not None: + bytes_data = uploaded_file.getvalue() + image_parts = [ + { + "mime_type": uploaded_file.type, # Get the mime type of the uploaded file + "data": bytes_data + } + ] + return image_parts + else: + raise FileNotFoundError("No file uploaded") + +# Initialize the Streamlit app +st.set_page_config(page_title="Invoice Decoder") +st.title("Invoice Decoder") +st.subheader("Transforms invoices into readable text, simplifying billing across multiple languages.") +input = st.text_input("Input Prompt: ", key="input") +uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"]) +image = "" + +if uploaded_file is not None: + image = Image.open(uploaded_file) + st.image(image, caption="Uploaded Image.", use_column_width=True) + +submit = st.button("Execute Prompt") + +input_prompt = """ + You are an expert in understanding invoices. + You will receive input images as invoices & + you will have to answer questions based on the input image. + """ + +# If the submit button is clicked +if submit: + image_data = input_image_setup(uploaded_file) + # Get response from Gemini model + response = get_gemini_response(input_prompt, image_data, input) + + # Display the Gemini response (detailed analysis of the invoice) + st.subheader("The Response from Gemini Model") + st.write(response) + + # Now, integrate the llmware model to summarize the response + # Initialize llmware model for summarization + llm_model = ModelCatalog().load_model(selected_model="slim-summary-tool", sample=False, temperature=0.0, max_output=200) + + # Call llmware model to summarize the response from Gemini model + summary_response = llm_model.function_call(response, function="summarize", params=["summary points (5)"]) + + # Assuming the summary response is a list of points in the "llm_response" + if "llm_response" in summary_response: + st.subheader("Summary of the Invoice") + # Loop through the summary response list and display each point + for i, point in enumerate(summary_response["llm_response"]): + st.write(f"{i + 1}. {point}") + else: + st.write("No summary available.") diff --git a/hackathon/AI-Care(novathon)/scanAi.py b/hackathon/AI-Care(novathon)/scanAi.py new file mode 100644 index 00000000..c12e6db5 --- /dev/null +++ b/hackathon/AI-Care(novathon)/scanAi.py @@ -0,0 +1,79 @@ +import streamlit as st +from pathlib import Path +import google.generativeai as genai +from google_api_key import google_api_key +from llmware.models import ModelCatalog + +# Configure Google API Key +genai.configure(api_key=google_api_key) + +# Streamlit App Setup +st.set_page_config(page_title="Scan AI", layout="wide") +st.title("Scan AI") +st.subheader("Upload medical images to get precise, AI-driven diagnoses for improved healthcare decisions.") + +# File upload widget +file_uploaded = st.file_uploader('Upload the image for Analysis', type=['png', 'jpg', 'jpeg']) + +# Set up the Gemini model +generation_config = { + "temperature": 1, + "top_p": 0.95, + "top_k": 0, + "max_output_tokens": 8192, +} + +safety_settings = [ + {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, + {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, + {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, + {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, +] + +system_prompts = [ + """ + You are a domain expert in medical image analysis. You are tasked with + examining medical images for a renowned hospital. + Your expertise will help in identifying or + discovering any anomalies, diseases, conditions or + any health issues that might be present in the image. + """ +] + +model = genai.GenerativeModel(model_name="gemini-1.5-flash", + generation_config=generation_config, + safety_settings=safety_settings) + +# Upload button and submission logic +if file_uploaded: + st.image(file_uploaded, width=200, caption='Uploaded Image') + + submit = st.button("Generate Analysis") + + if submit: + image_data = file_uploaded.getvalue() + image_parts = [{"mime_type": "image/jpg", "data": image_data}] + prompt_parts = [image_parts[0], system_prompts[0]] + + # Generate response from Gemini model + response = model.generate_content(prompt_parts) + + if response: + st.title('Detailed Analysis Based on the Uploaded Image') + st.write(response.text) + + # Now integrate llmware model to summarize the response + # Initialize llmware model for summarization + llm_model = ModelCatalog().load_model(selected_model="slim-summary-tool", sample=False, temperature=0.0, max_output=200) + + # Call llmware model to summarize the analysis response + summary_response = llm_model.function_call(response.text, function="summarize", params=["summary points (5)"]) + + # Assuming summary_response is a dictionary with a key "llm_response" containing a list of points + if "llm_response" in summary_response: + st.title('Summary of the Analysis') + # Loop through the summary response list and display each point + for i, point in enumerate(summary_response["llm_response"]): + st.write(f"{i+1}. {point}") + else: + st.write("No summary available.") From e3b51103640b89427a4d242213aca59e9f4e1f75 Mon Sep 17 00:00:00 2001 From: Vishnu Raj <88265715+creator-10@users.noreply.github.com> Date: Mon, 2 Dec 2024 22:54:13 +0530 Subject: [PATCH 03/13] Create about_us.html --- .../AI-Care(novathon)/templates/about_us.html | 116 ++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 hackathon/AI-Care(novathon)/templates/about_us.html diff --git a/hackathon/AI-Care(novathon)/templates/about_us.html b/hackathon/AI-Care(novathon)/templates/about_us.html new file mode 100644 index 00000000..9c770314 --- /dev/null +++ b/hackathon/AI-Care(novathon)/templates/about_us.html @@ -0,0 +1,116 @@ + + +
+At AI-CARE, we are dedicated to transforming healthcare through the power of artificial intelligence. Our platform integrates advanced technologies to enhance diagnostic accuracy, streamline communication, and improve patient care. By leveraging AI to analyze medical data, generate insightful reports, and automate critical workflows, we empower healthcare professionals to make informed decisions quickly and efficiently. With AI-CARE, we aim to simplify the healthcare experience, reduce administrative burdens, and ensure that every patient receives the highest quality care.
+Upload medical images to get precise, AI-driven diagnoses for improved healthcare decisions.
+ +Transforms invoices into readable text, simplifying billing across multiple languages.
+ +Our multilingual invoice reader automatically converts invoice details into clear, descriptive text. With support for multiple languages, it ensures that healthcare providers and patients can understand billing details with ease. This tool streamlines financial processes, reduces misunderstandings, and helps maintain transparent communication between doctors, patients, and pharmacies.
+ +Upload medical images and receive instant, AI-powered diagnoses using Google’s Gemini Pro Vision model. The system analyzes images to provide accurate insights into possible medical conditions. This tool enhances diagnostic accuracy, enabling healthcare providers to make informed decisions quickly. With advanced AI capabilities, it ensures fast and reliable results. Empowering healthcare professionals, it streamlines diagnostic workflows and improves patient care. Experience the future of medical analysis with cutting-edge technology.
+ +Consultation Analysis
+Summarizes doctor-patient talks in real time, providing clear, detailed medical reports.
Result :
+ggL8
zLM*MUZEWrA-9EW{czSt%{`Ng6I3zSIJU$^YDLExIE&WG+LE+D$;*!$Zy84DjcvEvr
zcTaC$KVo2TXli<9c5Z%QacOgFdk49@w|{VWae4Lo&-Kmi-QWMXQ2ufLFa6(O{|_$0
ze_W{O|Ed)0KQ0th&wnc#Avy* 6EQ*BaQ`Wgxg?d`@6x5Cyy^bzKpl`n{;^gHEP~ZH*3y!Vn${8
z2>|o=S$~Y+RjBS
zayA#|&lTq^7DyD{aTlrd%fBj!KO{GN{|uld=4vHTQ3O`(n0xEv1FXi}GJlss{Ys6k
zw_dO&+a!`jPtmR#k2SB9WTK8_SZWulxJA3^IrX_x*FBhAi1Q6nH%1gK6M$Z4sT$at
zxV)Ae!5T|_vr_5h=w8At!Q*gNy0*I9$0@)`_30VFY^IY)@FyBEwAwDhLg#I~5s&GO
zi#x1;;y>%KBgX5cS;O;Mzstek7jnXqzh)3e-3zlGbzO!jT}&!Y;7#NJDmk^%wuc0m
zk(0MKc3;unw?NcJd7aePq_qDTU^QA^qg<1}qD(pVN6*fg#RY(WC#
`8vhIXZz5je}x}gr1}r)RaaBXniCn{nWKsDAys5BLpJ
zlfyDE9$Dksze?}FXE~T1+E$No{^J$T{qzNMdWY=S6BFk6B}qNul@IbXy{#9pmKw6$
z{P1rN%0K0w=~blAU!UEd%D(Z{KW2S3RkwI&P=o!YjF3M{;`J}t`$tw~h2A#*0BBH@
zWwQqgkm!7ECW|Wn01JLq2-4(V;cv>mr_?`Z+rRmBWRQ9a=Wjj*Yw%}on
3{GW1$jScz
zU$A}hKq2D*?PTN-#=7qX{7Am=efjge#fBk?i^;iE5DIQT+-&@h&I15QE6*!$bjveb
z$34IY_wmgdk1}Kb06oxs(I7v0cw#qIza}P$r&yv*v6YNM+#zw75R;K0kP>#V7$B}r
zIENdM+diuR3PJV0@V|?+{{RzMuBD{1k_bzicmDubhtu`o^fY+Oz}jbu^qH06GQn`T
zk$Z!Vpmt$jF?d^6@fU-yqPwxYirwHv3dp1yE;nt_4-5InU
zUVBN<=V@k%!T0s9zd`uR@l(OEt-i5pnzgzde)sPY{`mg@17Cf3tKp}DylDwddqB3D
zPvM