diff --git a/Amongus_Character/among.py b/Amongus_Character/among.py deleted file mode 100644 index a406505..0000000 --- a/Amongus_Character/among.py +++ /dev/null @@ -1,99 +0,0 @@ -import turtle - -body_color = 'cyan' -glass_color = '#9acedc' - -s = turtle.getscreen() -t = turtle.Turtle() - -def body(): - t.pensize(20) - t.fillcolor(body_color) - t.begin_fill() - t.right(90) - t.forward(50) - t.right(180) - t.circle(40, -180) - t.right(180) - t.forward(200) - - t.right(180) - t.circle(100, -180) - - t.backward(20) - t.left(15) - t.circle(500, -20) - t.backward(20) - - t.circle(40, -180) - t.left(7) - t.backward(50) - - t.up() - t.left(90) - t.forward(10) - t.right(90) - t.down() - - t.right(240) - t.circle(50, -70) - - t.end_fill() - -def glass(): - t.up() - t.right(230) - t.forward(100) - t.left(90) - t.forward(20) - t.right(90) - - t.down() - t.fillcolor(glass_color) - t.begin_fill() - - t.right(150) - t.circle(90, -55) - - t.right(180) - t.forward(1) - t.right(180) - t.circle(10, -65) - t.right(180) - t.forward(110) - t.right(180) - - t.circle(50, -190) - t.right(170) - t.forward(80) - - t.right(180) - t.circle(45, -30) - - t.end_fill() - -def backpack(): - t.up() - t.right(60) - t.forward(100) - t.right(90) - t.forward(75) - - t.fillcolor(body_color) - t.begin_fill() - - t.down() - t.forward(30) - t.right(255) - - t.circle(300, -30) - t.right(260) - t.forward(30) - - t.end_fill() - -body() -glass() -backpack() - -t.screen.exitonclick() diff --git a/Cartoonize/cartoon.py b/Cartoonize/cartoon.py deleted file mode 100644 index 14f180f..0000000 --- a/Cartoonize/cartoon.py +++ /dev/null @@ -1,22 +0,0 @@ -import cv2 -import numpy as np -from tkinter.filedialog import * - -photo = askopenfilename() -img = cv2.imread(photo) - -grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) -grey = cv2.medianBlur(grey, 5) -edges = cv2.adaptiveThreshold(grey, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9) - -#cartoonize -color = cv2.bilateralFilter(img, 9, 250, 250) -cartoon = cv2.bitwise_and(color, color, mask = edges) - -cv2.imshow("Image", img) -cv2.imshow("Cartoon", cartoon) - -#save -cv2.imwrite("cartoon.jpg", cartoon) -cv2.waitKey(0) -cv2.destroyAllWindows() \ No newline at end of file diff --git a/CoronaTrackerAPI/corona.py b/CoronaTrackerAPI/corona.py deleted file mode 100644 index a450503..0000000 --- a/CoronaTrackerAPI/corona.py +++ /dev/null @@ -1,38 +0,0 @@ -import tkinter as tk -import requests -import datetime - -def getCovidData(): - api = "https://disease.sh/v3/covid-19/all" - json_data = requests.get(api).json() - total_cases = str(json_data['cases']) - total_deaths = str(json_data['deaths']) - today_cases = str(json_data['todayCases']) - today_deaths = str(json_data['todayDeaths']) - today_recovered = str(json_data['todayRecovered']) - updated_at = json_data['updated'] - date = datetime.datetime.fromtimestamp(updated_at/1e3) - label.config(text = "Total Cases: "+total_cases+ - "\n"+"Total Deaths: "+total_deaths+ - "\n"+"Today Cases: "+today_cases+ - "\n"+"Today Deaths: "+today_deaths+ - "\n"+"Today Recovered: "+today_recovered) - - label2.config(text = date) -canvas = tk.Tk() -canvas.geometry("400x400") -canvas.title("Corona Tracker App") - -f = ("poppins", 15, "bold") - -button = tk.Button(canvas, font = f, text = "Load", command = getCovidData) -button.pack(pady = 20) - -label = tk.Label(canvas, font = f) -label.pack(pady=20) - -label2 = tk.Label(canvas, font = 8) -label2.pack() -getCovidData() - -canvas.mainloop() \ No newline at end of file diff --git a/Hand Gesture Detection/Explaination.pdf b/Hand Gesture Detection/Explaination.pdf new file mode 100644 index 0000000..ac4692d Binary files /dev/null and b/Hand Gesture Detection/Explaination.pdf differ diff --git a/Hand Gesture Detection/app.py b/Hand Gesture Detection/app.py new file mode 100644 index 0000000..09bf38f --- /dev/null +++ b/Hand Gesture Detection/app.py @@ -0,0 +1,112 @@ +import cv2 +import mediapipe as mp + +# Initialize Mediapipe hands and drawing utilities +mp_hands = mp.solutions.hands +mp_drawing = mp.solutions.drawing_utils + +# Function to determine if hand is open +def is_open_hand(landmarks): + thumb_tip = landmarks[mp_hands.HandLandmark.THUMB_TIP] + index_tip = landmarks[mp_hands.HandLandmark.INDEX_FINGER_TIP] + middle_tip = landmarks[mp_hands.HandLandmark.MIDDLE_FINGER_TIP] + ring_tip = landmarks[mp_hands.HandLandmark.RING_FINGER_TIP] + pinky_tip = landmarks[mp_hands.HandLandmark.PINKY_TIP] + + # Check if all fingers are above the base of the hand + return (index_tip.y < thumb_tip.y and + middle_tip.y < thumb_tip.y and + ring_tip.y < thumb_tip.y and + pinky_tip.y < thumb_tip.y) + +# Function to determine if the gesture is a thumbs up +def is_thumbs_up(landmarks): + thumb_tip = landmarks[mp_hands.HandLandmark.THUMB_TIP] + index_tip = landmarks[mp_hands.HandLandmark.INDEX_FINGER_TIP] + + # Check if the thumb is extended and the index is down + return (thumb_tip.y < index_tip.y) + +# Function to determine if the gesture is a peace sign +def is_peace_sign(landmarks): + index_tip = landmarks[mp_hands.HandLandmark.INDEX_FINGER_TIP] + middle_tip = landmarks[mp_hands.HandLandmark.MIDDLE_FINGER_TIP] + ring_tip = landmarks[mp_hands.HandLandmark.RING_FINGER_TIP] + pinky_tip = landmarks[mp_hands.HandLandmark.PINKY_TIP] + + # Check if index and middle fingers are up while others are down + return (index_tip.y < middle_tip.y and + ring_tip.y > middle_tip.y and + pinky_tip.y > middle_tip.y) + + +# Function to determine if the gesture is a two fingers up +def is_two_fingers_up(landmarks): + index_tip = landmarks[mp_hands.HandLandmark.INDEX_FINGER_TIP] + middle_tip = landmarks[mp_hands.HandLandmark.MIDDLE_FINGER_TIP] + thumb_tip = landmarks[mp_hands.HandLandmark.THUMB_TIP] + ring_tip = landmarks[mp_hands.HandLandmark.RING_FINGER_TIP] + pinky_tip = landmarks[mp_hands.HandLandmark.PINKY_TIP] + + # Check if index and middle fingers are up while others are down + return (index_tip.y < thumb_tip.y and + middle_tip.y < thumb_tip.y and + ring_tip.y > thumb_tip.y and + pinky_tip.y > thumb_tip.y) + +# Start video capture +cap = cv2.VideoCapture(0) + +with mp_hands.Hands(min_detection_confidence=0.7, min_tracking_confidence=0.7) as hands: + while cap.isOpened(): + ret, frame = cap.read() + if not ret: + print("Error: Failed to capture video.") + break + + # Convert the frame to RGB + image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + image.flags.writeable = False + + # Process the image and detect hands + results = hands.process(image) + + # Convert back to BGR for rendering + image.flags.writeable = True + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + + if results.multi_hand_landmarks: + for hand_landmarks in results.multi_hand_landmarks: + # Draw hand landmarks + mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS) + + # Determine gesture + if is_open_hand(hand_landmarks.landmark): + gesture_text = 'Open Hand' + gesture_color = (0, 255, 0) # Green + elif is_thumbs_up(hand_landmarks.landmark): + gesture_text = 'Thumbs Up' + gesture_color = (255, 255, 0) # Cyan + elif is_peace_sign(hand_landmarks.landmark): + gesture_text = 'Peace Sign' + gesture_color = (255, 0, 255) # Magenta + elif is_two_fingers_up(hand_landmarks.landmark): + gesture_text = 'Two Fingers Up' + gesture_color = (128, 0, 128) # Purple + else: + gesture_text = 'Fist' + gesture_color = (0, 0, 255) # Red + + # Display gesture text on the image + cv2.putText(image, gesture_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, gesture_color, 2) + + # Display the resulting frame + cv2.imshow('Hand Gesture Detection', image) + + # Break the loop on 'q' key press + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +# Release the capture and destroy windows +cap.release() +cv2.destroyAllWindows() diff --git a/Language Detection/Explaination.pdf b/Language Detection/Explaination.pdf new file mode 100644 index 0000000..5588ba3 Binary files /dev/null and b/Language Detection/Explaination.pdf differ diff --git a/Language Detection/LangDetect.py b/Language Detection/LangDetect.py new file mode 100644 index 0000000..ddb44b4 --- /dev/null +++ b/Language Detection/LangDetect.py @@ -0,0 +1,188 @@ +import tkinter as tk +from tkinter import filedialog +import string +import pandas as pd +import matplotlib.pyplot as plt +import seaborn as sns +from statistics import mean +from googletrans import Translator + +# Constants for column names +CNST_TEXT_LINE_NAME = "Input Text" +CNST_TEST_LANGDETECT_LANG_LINE_NAME = "LangDetect: Detected Language" +CNST_TEST_LANGDETECT_PROB_LINE_NAME = "LangDetect: Confidence Score" +CNST_TEST_LANGID_LANG_LINE_NAME = "LangID: Detected Language" +CNST_TEST_LANGID_PROB_LINE_NAME = "LangID: Confidence Score" + +# Output file names +CNST_LANG_DETECTION_RESULTS_FILE_NAME = "Language_Detection_Results.csv" +CNST_LANG_DETECTION_DIFFERENCES_FILE_NAME = "Detection_Differences.csv" + +# Functions for detection +def detect_language_with_langdetect(line): + from langdetect import detect_langs + try: + langs = detect_langs(line) + for item in langs: + return item.lang, item.prob + except: + return "Unknown", 0.0 + +def detect_language_with_langid(line): + from langid import classify + lang, prob = classify(line) + return lang, prob + +def translate_text_to_english(text): + # Initialize the Google Translator + translator = Translator() + try: + # Translate the text to English + translated = translator.translate(text, dest='en') + print("") + return translated.text + except Exception as e: + return f"Error in translation: {e}" + +def write_data_to_csv(lines, langdetect_lang_results, langdetect_prob_results, langid_lang_results, langid_prob_results): + data = { + CNST_TEXT_LINE_NAME: lines, + CNST_TEST_LANGDETECT_LANG_LINE_NAME: langdetect_lang_results, + CNST_TEST_LANGDETECT_PROB_LINE_NAME: langdetect_prob_results, + CNST_TEST_LANGID_LANG_LINE_NAME: langid_lang_results, + CNST_TEST_LANGID_PROB_LINE_NAME: langid_prob_results + } + df = pd.DataFrame(data) + df.to_csv(CNST_LANG_DETECTION_RESULTS_FILE_NAME, sep="|", index=False) + +def extract_differences(file_name): + df = pd.read_csv(file_name, sep="|") + differences = df[df[CNST_TEST_LANGDETECT_LANG_LINE_NAME] != df[CNST_TEST_LANGID_LANG_LINE_NAME]] + differences.to_csv(CNST_LANG_DETECTION_DIFFERENCES_FILE_NAME, sep="|", index=False) + +# Visualization for Non-Technical Users +def show_results_for_non_tech_users(file_name): + df = pd.read_csv(file_name, sep="|") + sns.set_theme(style="whitegrid") + + print("\nLanguage Detection Results Summary") + print("=" * 50) + + # Display simple text-based insights + detected_languages_langdetect = df[CNST_TEST_LANGDETECT_LANG_LINE_NAME].value_counts() + detected_languages_langid = df[CNST_TEST_LANGID_LANG_LINE_NAME].value_counts() + + print("\nMost Common Languages Detected (LangDetect):") + print(detected_languages_langdetect) + + print("\nMost Common Languages Detected (LangID):") + print(detected_languages_langid) + + # Create visualizations + fig, axes = plt.subplots(2, 2, figsize=(16, 12)) + + # Pie chart for LangDetect + axes[0, 0].set_title("LangDetect - Detected Languages", fontsize=14) + detected_languages_langdetect.plot.pie( + autopct='%1.1f%%', + startangle=90, + ax=axes[0, 0], + colors=sns.color_palette("pastel"), + ) + axes[0, 0].set_ylabel('') + + # Pie chart for LangID + axes[0, 1].set_title("LangID - Detected Languages", fontsize=14) + detected_languages_langid.plot.pie( + autopct='%1.1f%%', + startangle=90, + ax=axes[0, 1], + colors=sns.color_palette("pastel"), + ) + axes[0, 1].set_ylabel('') + + # Bar chart for LangDetect confidence + langdetect_mean_probs = df.groupby(CNST_TEST_LANGDETECT_LANG_LINE_NAME)[CNST_TEST_LANGDETECT_PROB_LINE_NAME].mean() + langdetect_mean_probs.sort_values().plot.barh(ax=axes[1, 0], color="skyblue") + axes[1, 0].set_title("LangDetect - Mean Confidence Score", fontsize=14) + axes[1, 0].set_xlabel("Mean Confidence") + + # Bar chart for LangID confidence + langid_mean_probs = df.groupby(CNST_TEST_LANGID_LANG_LINE_NAME)[CNST_TEST_LANGID_PROB_LINE_NAME].mean() + langid_mean_probs.sort_values().plot.barh(ax=axes[1, 1], color="skyblue") + axes[1, 1].set_title("LangID - Mean Confidence Score", fontsize=14) + axes[1, 1].set_xlabel("Mean Confidence") + + plt.tight_layout() + plt.savefig("Language_Detection_Visualization.png") + plt.show() + + print("\nCharts saved as 'Language_Detection_Visualization.png'.") + print("Detailed results are saved in 'Language_Detection_Results.csv'.") + print("Any differences are saved in 'Detection_Differences.csv'.") + +# Tkinter GUI for showing translations +def show_translation_window(original_texts, translated_texts): + # Create a new window (Toplevel) + translation_window = tk.Toplevel() + translation_window.title("Translation Results") + + # Create a scrollable text box to display the input and translated texts + text_box = tk.Text(translation_window, wrap=tk.WORD, width=80, height=20) + text_box.pack(padx=10, pady=10) + + # Display each original and translated pair + for original, translated in zip(original_texts, translated_texts): + text_box.insert(tk.END, f"Original: {original}\n") + text_box.insert(tk.END, f"Translated: {translated}\n") + text_box.insert(tk.END, "=" * 50 + "\n") + + text_box.config(state=tk.DISABLED) + +# Main Workflow +def main(): + # Initialize Tkinter root window (hidden) + root = tk.Tk() + root.withdraw() + + # Ask user to select a file + file_path = filedialog.askopenfilename() + with open(file_path, 'r', encoding='utf-8') as file: + lines = file.readlines() + + langdetect_lang_results = [] + langdetect_prob_results = [] + langid_lang_results = [] + langid_prob_results = [] + raw_text = [] + translated_texts = [] + + # Process lines + for line in lines: + line = line.translate(str.maketrans('', '', string.punctuation)).rstrip() + raw_text.append(line) + + # Translate text to English + translated_text = translate_text_to_english(line) + translated_texts.append(translated_text) + + langdetect_lang, langdetect_prob = detect_language_with_langdetect(line) + langdetect_lang_results.append(langdetect_lang) + langdetect_prob_results.append(langdetect_prob) + + langid_lang, langid_prob = detect_language_with_langid(line) + langid_lang_results.append(langid_lang) + langid_prob_results.append(langid_prob) + + # Show translation window + show_translation_window(raw_text, translated_texts) + + # Save results to CSV + write_data_to_csv(raw_text, langdetect_lang_results, langdetect_prob_results, langid_lang_results, langid_prob_results) + extract_differences(CNST_LANG_DETECTION_RESULTS_FILE_NAME) + show_results_for_non_tech_users(CNST_LANG_DETECTION_RESULTS_FILE_NAME) + + root.mainloop() + +if __name__ == "__main__": + main() diff --git a/Language Detection/Langauages Supported.txt b/Language Detection/Langauages Supported.txt new file mode 100644 index 0000000..633895d --- /dev/null +++ b/Language Detection/Langauages Supported.txt @@ -0,0 +1,88 @@ +1. LangDetect (Based on the langdetect package) +The LangDetect library supports a variety of languages. The full list includes, but is not limited to: + +English +Spanish +French +German +Italian +Portuguese +Dutch +Russian +Chinese (Simplified) +Japanese +Korean +Arabic +Hindi +Turkish +Swedish +Finnish +Danish +Polish +Norwegian +Czech +Greek +Romanian +Hungarian +Thai +Indonesian +Bengali +Malay +Ukrainian +Hebrew +Vietnamese +Tamil +Telugu +Swahili +Filipino (Tagalog) +Punjabi +LangDetect supports over 55 languages in total. + +2. LangID (Based on the langid package) +The LangID library is slightly more limited but still supports many common languages. Here's a list of languages it detects: + +English +Spanish +French +German +Italian +Portuguese +Dutch +Russian +Arabic +Chinese (Simplified) +Japanese +Korean +Hindi +Turkish +Swedish +Danish +Polish +Finnish +Norwegian +Greek +Czech +Hungarian +Thai +Indonesian +Bengali +Swahili +Tamil +Telugu +Filipino (Tagalog) +LangID supports a smaller set of approximately 97 languages. + +Summary of Supported Languages +Both libraries can detect a variety of languages, including major ones like: + +English +Spanish +French +German +Italian +Chinese (Simplified & Traditional) +Japanese +Arabic +Hindi +Russian +Portuguese \ No newline at end of file diff --git a/Notes & Task Prioritizer/app.py b/Notes & Task Prioritizer/app.py new file mode 100644 index 0000000..f945b34 --- /dev/null +++ b/Notes & Task Prioritizer/app.py @@ -0,0 +1,146 @@ +import tkinter as tk +from tkinter import messagebox, ttk +from datetime import datetime + +class TodoApp(tk.Tk): + def __init__(self): + super().__init__() + + # Window setup + self.title('Material You To-Do List') + self.geometry('600x550') + self.configure(bg='#F3F4F6') # Light background color + self.resizable(False, False) + + self.tasks = [] + + # Title + self.title_label = tk.Label(self, text="To-Do List", font=('Roboto', 24, 'bold'), bg='#F3F4F6', fg='#333') + self.title_label.pack(pady=20) + + # Input Frame + self.input_frame = tk.Frame(self, bg='#F3F4F6') + self.input_frame.pack(pady=5) + + self.task_input = tk.Entry(self.input_frame, width=22, font=('Roboto', 14), bd=0, relief='flat', bg='#E0E0E0', fg='#333') + self.task_input.pack(side='left', padx=5, pady=5) + + # Priority Dropdown + self.priority_var = tk.StringVar(value='Normal') + self.priority_dropdown = ttk.Combobox(self.input_frame, textvariable=self.priority_var, + values=["Low", "Normal", "High"], width=10, state='readonly') + self.priority_dropdown.pack(side='left', padx=5, pady=5) + + self.btn_add_task = tk.Button(self.input_frame, text="Add Task", command=self.add_task, + bg='#6200EE', fg='white', font=('Roboto', 12), borderwidth=0, relief='flat', padx=10, pady=5) + self.btn_add_task.pack(side='right', padx=5, pady=5) + + # Listbox Frame with Scrollbar + self.list_frame = tk.Frame(self) + self.list_frame.pack(pady=10) + + self.lb_tasks = tk.Listbox(self.list_frame, width=50, height=12, font=('Roboto', 12), + bg='#FFFFFF', fg='#333', selectbackground='#D1E7DD', borderwidth=0) + self.lb_tasks.pack(side='left') + + self.scrollbar = tk.Scrollbar(self.list_frame) + self.scrollbar.pack(side='right', fill='y') + + self.lb_tasks.config(yscrollcommand=self.scrollbar.set) + self.scrollbar.config(command=self.lb_tasks.yview) + + # Button Frame + self.button_frame = tk.Frame(self, bg='#F3F4F6') + self.button_frame.pack(pady=10) + + # Task Buttons + self.btn_delete = tk.Button(self.button_frame, text="Delete", command=self.delete_task, + bg='#FF5252', fg='white', font=('Roboto', 12), borderwidth=0, relief='flat', width=12) + self.btn_delete.pack(side='left', padx=5, pady=5) + + self.btn_delete_all = tk.Button(self.button_frame, text="Delete All", command=self.delete_all_tasks, + bg='#FF5252', fg='white', font=('Roboto', 12), borderwidth=0, relief='flat', width=12) + self.btn_delete_all.pack(side='left', padx=5, pady=5) + + # Save Buttons + self.btn_save = tk.Button(self.button_frame, text="Save Task", command=self.save_selected_task, + bg='#03A9F4', fg='white', font=('Roboto', 12), borderwidth=0, relief='flat', width=12) + self.btn_save.pack(side='left', padx=5, pady=5) + + self.btn_save_all = tk.Button(self.button_frame, text="Save All Tasks", command=self.save_all_tasks, + bg='#03A9F4', fg='white', font=('Roboto', 12), borderwidth=0, relief='flat', width=12) + self.btn_save_all.pack(side='left', padx=5, pady=5) + + # Task Count Label + self.task_count_label = tk.Label(self, text="", bg='#F3F4F6', font=('Roboto', 12)) + self.task_count_label.pack(pady=5) + + self.update_task_count() + + def add_task(self): + """Add a new task to the list.""" + task_text = self.task_input.get().strip() + task_priority = self.priority_var.get() + + if task_text: + time_added = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + self.tasks.append({'task': task_text, 'time_added': time_added, 'priority': task_priority}) + self.update_listbox() + self.task_input.delete(0, 'end') + self.update_task_count() + else: + messagebox.showwarning("Input Error", "Please enter a task.") + + def delete_task(self): + """Delete the selected task.""" + selected_task_index = self.lb_tasks.curselection() + if selected_task_index: + del self.tasks[selected_task_index[0]] + self.update_listbox() + self.update_task_count() + else: + messagebox.showwarning("Selection Error", "Please select a task to delete.") + + def delete_all_tasks(self): + """Clear all tasks from the list.""" + self.tasks = [] + self.update_listbox() + self.update_task_count() + + def save_selected_task(self): + """Save the selected task to a file.""" + selected_task_index = self.lb_tasks.curselection() + if selected_task_index: + task = self.tasks[selected_task_index[0]] + with open("./saved_task.txt", "w") as f: + f.write(f"Task: {task['task']}\nPriority: {task['priority']}\nAdded: {task['time_added']}\n") + messagebox.showinfo("Save Task", "Selected task saved to 'saved_task.txt'") + else: + messagebox.showwarning("Selection Error", "Please select a task to save.") + + def save_all_tasks(self): + """Save all tasks to a file.""" + if self.tasks: + with open("all_tasks.txt", "w") as f: + for task in self.tasks: + f.write(f"Task: {task['task']}\nPriority: {task['priority']}\nAdded: {task['time_added']}\n\n") + messagebox.showinfo("Save All Tasks", "All tasks saved to 'all_tasks.txt'") + else: + messagebox.showwarning("No Tasks", "No tasks available to save.") + + def update_listbox(self): + """Update the task list display.""" + self.lb_tasks.delete(0, "end") + for task in self.tasks: + task_display = f"{task['task']} | Priority: {task['priority']} | Added: {task['time_added']}" + self.lb_tasks.insert("end", task_display) + + def update_task_count(self): + """Update the task count label.""" + task_count = len(self.tasks) + self.task_count_label.config(text=f"Number of tasks: {task_count}") + + +if __name__ == '__main__': + app = TodoApp() + app.mainloop() diff --git a/Sentiment Chatbot/app.py b/Sentiment Chatbot/app.py new file mode 100644 index 0000000..81cd4c3 --- /dev/null +++ b/Sentiment Chatbot/app.py @@ -0,0 +1,255 @@ +import streamlit as st +from PIL import Image +import pytesseract +import cv2 +import numpy as np +from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer +import matplotlib.pyplot as plt +import matplotlib.patches as mpatches +import speech_recognition as sr +from langdetect import detect, DetectorFactory +import re +from spellchecker import SpellChecker +import requests +from bs4 import BeautifulSoup + +DetectorFactory.seed = 0 + +def preprocess_text(text): + text = re.sub(r'[^a-zA-Z\s]', '', text) + text = text.lower() + return text + +def correct_spelling(text): + spell = SpellChecker() + corrected_words = [] + for word in text.split(): + corrected_word = spell.correction(word) + if corrected_word is not None: + corrected_words.append(corrected_word) + else: + corrected_words.append(word) + return ' '.join(corrected_words) + +def detect_sentiment(input_text): + try: + if not input_text.strip(): + return "Please enter a paragraph.", "", {}, 0, 0 + + if detect(input_text) != 'en': + return "Please provide text in English.", "", {}, 0, 0 + + input_text = preprocess_text(input_text) + input_text = correct_spelling(input_text) + + sid_obj = SentimentIntensityAnalyzer() + sentiment_scores = sid_obj.polarity_scores(input_text) + word_sentiments = {} + positive_words = [] + negative_words = [] + + for word in input_text.split(): + word_sentiment = sid_obj.polarity_scores(word)['compound'] + word_sentiments[word] = word_sentiment + if word_sentiment > 0: + positive_words.append(word) + elif word_sentiment < 0: + negative_words.append(word) + + total_words = len(positive_words) + len(negative_words) + if total_words == 0: + return ("Can't provide text sentiment due to the following reasons:\n" + "1. Check if the text is in English.\n" + "2. Check the spellings of words for more accurate results."), "", {}, 0, 0 + + positive_percentage = len(positive_words) / total_words * 100 + negative_percentage = len(negative_words) / total_words * 100 + + compound_score = sentiment_scores['compound'] + if compound_score >= 0.05: + overall_sentiment = 'Positive' + sentiment_emoji = '😊' + elif compound_score <= -0.05: + overall_sentiment = 'Negative' + sentiment_emoji = '😞' + else: + overall_sentiment = 'Neutral' + sentiment_emoji = '😐' + + num_positive_words = len(positive_words) + num_negative_words = len(negative_words) + + response_message = (f"
" + f"" + f"Based on the analysis of your input, the overall suggested paragraph/sentence is " + f"{overall_sentiment.lower()}.
\n\n") + st.write(f"Sentiment Emotion: {sentiment_emoji}") + response_message += "Percentage breakdown:\n" + response_message += f"- Positive: {positive_percentage:.2f}%\n" + response_message += f"- Negative: {negative_percentage:.2f}%\n\n" + response_message += "Here is your input:\n\n" + for word in input_text.split(): + if word in positive_words: + response_message += f"{word} " + elif word in negative_words: + response_message += f"{word} " + else: + response_message += f"{word} " + + labels = ['Positive', 'Negative'] + sizes = [positive_percentage, negative_percentage] + colors = ['#66c2a5', '#fc8d62'] + fig, ax = plt.subplots() + wedges, texts, autotexts = ax.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%', startangle=140, wedgeprops={'edgecolor': 'white'}, textprops=dict(color="w")) + ax.axis('equal') + plt.title('Sentiment Breakdown', fontsize=14) + legend_labels = ['Positive', 'Negative'] + legend_handles = [mpatches.Patch(color=color, label=label) for color, label in zip(colors, legend_labels)] + plt.legend(handles=legend_handles, loc='best', fancybox=True, shadow=True, fontsize=10) + ax.spines['top'].set_visible(False) + ax.spines['right'].set_visible(False) + plt.tight_layout() + + for i, autotext in enumerate(autotexts): + autotext.set_color('white') + autotext.set_fontsize(12) + + st.pyplot(fig) + + return response_message, sentiment_emoji, word_sentiments, num_positive_words, num_negative_words + except Exception as e: + return f"An error occurred: {str(e)}", "", {}, 0, 0 + +def extract_text_from_image(uploaded_image): + img = Image.open(uploaded_image) + img = img.convert('RGB') + img_cv = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) + gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY) + text = pytesseract.image_to_string(gray) + return text + +def extract_text_from_url(url): + try: + response = requests.get(url) + if response.status_code == 200: + soup = BeautifulSoup(response.content, 'html.parser') + paragraphs = soup.find_all('p') + text = ' '.join([para.get_text() for para in paragraphs]) + return text + else: + return "Failed to retrieve the content from the URL." + except Exception as e: + return f"An error occurred while fetching the URL content: {str(e)}" + +def main(): + st.set_page_config(page_title="Sentiment Analysis", page_icon=":speech_balloon:") + + st.sidebar.info( + "Welcome to the Sentiment Analysis App! Feel free to express yourself by typing your message, uploading an image, speaking into the microphone, or providing a URL. Click 'Send' to analyze the sentiment and discover the emotions behind your words. Let's explore the sentiment together!" + ) + st.sidebar.markdown("---") + st.sidebar.info( + "Information Insights: This chat app uses VADER Sentiment Analysis to provide sentiment analysis of your messages." + ) + st.sidebar.markdown("---") + st.sidebar.subheader("Instructions:") + st.sidebar.write("1. Use the text area provided to input the text you want to analyze for sentiment. This could be a sentence, a paragraph, or any text you'd like to evaluate.") + st.sidebar.write("2. Alternatively, you can upload an image containing text. Our app will extract the text from the image and perform sentiment analysis on it.") + st.sidebar.write("3. You can also click on the microphone button to speak your message. Our app will transcribe your speech and perform sentiment analysis on it.") + st.sidebar.write("4. You can also provide a URL, and our app will extract the text from the webpage and perform sentiment analysis on it.") + st.sidebar.write("5. Once you've entered your text, uploaded an image, spoken into the microphone, or provided a URL, click the 'Send' button to initiate the sentiment analysis process.") + st.sidebar.markdown("---") + st.sidebar.subheader("Features:") + st.sidebar.write("- Sentiment analysis of user input.") + st.sidebar.write("- Sentiment emotion support in the form of emoji for easier understanding.") + st.sidebar.write("- Pie chart visualization for pictorial analysis.") + st.sidebar.write("- Positive & Negative Word Count for better Sentiment Analysis.") + st.sidebar.write("- Percentage-based breakdown of positive and negative sentence/paragraph.") + st.sidebar.write("- Individually highlighted text with different colors in summary.") + st.sidebar.write("- Supports offline mode for better accessibility.") + st.sidebar.markdown("---") + st.sidebar.subheader("Limitations:") + st.sidebar.write("- Currently supports only the English language.") + st.sidebar.write("- Sometimes may generate inaccurate output.") + + st.title("Welcome To Sentiment Analysis App") + st.markdown("---") + + input_option = st.radio("Select Input Option:", ("Text", "Image", "Microphone", "URL")) + + st.markdown("---") + + if input_option == "Text": + input_text = st.text_area("You:", "") + + if st.button("Send"): + response, sentiment_emoji, _, num_positive_words, num_negative_words = detect_sentiment(input_text) + st.markdown("---") + st.write(response, unsafe_allow_html=True) + st.markdown("---") + st.subheader("Word Count:") + st.write(f"Number of Positive Words: {num_positive_words}") + st.write(f"Number of Negative Words: {num_negative_words}") + + elif input_option == "Image": + uploaded_image = st.file_uploader("Upload Image:", type=["jpg", "jpeg", "png"]) + + if st.button("Send") and uploaded_image is not None: + extracted_text = extract_text_from_image(uploaded_image) + st.markdown("---") + st.write("Text Extracted from Image:") + st.write(extracted_text) + st.write("\n\n") + + response, sentiment_emoji, _, num_positive_words, num_negative_words = detect_sentiment(extracted_text) + st.write(response, unsafe_allow_html=True) + st.markdown("---") + st.subheader("Word Count:") + st.write(f"Number of Positive Words: {num_positive_words}") + st.write(f"Number of Negative Words: {num_negative_words}") + + elif input_option == "Microphone": + recognizer = sr.Recognizer() + with sr.Microphone() as source: + st.write("Speak now...") + audio_input = recognizer.listen(source) + + try: + st.write("Transcribing...") + spoken_text = recognizer.recognize_google(audio_input) + st.write("You said:") + st.write(spoken_text) + + response, sentiment_emoji, _, num_positive_words, num_negative_words = detect_sentiment(spoken_text) + st.markdown("---") + st.write(response, unsafe_allow_html=True) + st.markdown("---") + st.subheader("Word Count:") + st.write(f"Number of Positive Words: {num_positive_words}") + st.write(f"Number of Negative Words: {num_negative_words}") + + except sr.UnknownValueError: + st.write("Sorry, could not understand audio.") + except sr.RequestError as e: + st.write(f"Error: {e}") + + elif input_option == "URL": + url = st.text_input("Enter URL:") + + if st.button("Send"): + extracted_text = extract_text_from_url(url) + st.markdown("---") + st.write("Text Extracted from URL:") + st.write(extracted_text) + st.write("\n\n") + + response, sentiment_emoji, _, num_positive_words, num_negative_words = detect_sentiment(extracted_text) + st.write(response, unsafe_allow_html=True) + st.markdown("---") + st.subheader("Word Count:") + st.write(f"Number of Positive Words: {num_positive_words}") + st.write(f"Number of Negative Words: {num_negative_words}") + +if __name__ == "__main__": + main() diff --git a/Sentiment Chatbot/run command.txt b/Sentiment Chatbot/run command.txt new file mode 100644 index 0000000..d2989cc --- /dev/null +++ b/Sentiment Chatbot/run command.txt @@ -0,0 +1 @@ +python -m streamlit run diff --git a/Video2GIF/gif.py b/Video2GIF/gif.py deleted file mode 100644 index fc73fc6..0000000 --- a/Video2GIF/gif.py +++ /dev/null @@ -1,6 +0,0 @@ -from moviepy.editor import VideoFileClip -from tkinter.filedialog import * - -video = askopenfilename() -clip = VideoFileClip(video) -clip.write_gif("mygif.gif", fps=10) \ No newline at end of file diff --git a/Voice Assist & Navigations/Explaination.pdf b/Voice Assist & Navigations/Explaination.pdf new file mode 100644 index 0000000..d3f1c41 Binary files /dev/null and b/Voice Assist & Navigations/Explaination.pdf differ diff --git a/Voice Assist & Navigations/app.py b/Voice Assist & Navigations/app.py new file mode 100644 index 0000000..b1e05bb --- /dev/null +++ b/Voice Assist & Navigations/app.py @@ -0,0 +1,199 @@ +import speech_recognition as sr +import pyttsx3 +import webbrowser +import os +import subprocess +from datetime import datetime +import tkinter as tk +from tkinter import scrolledtext +import threading +import time +import re # For regex operations + +# Initialize recognizer and TTS engine +recognizer = sr.Recognizer() +tts_engine = pyttsx3.init() + +# List of available commands +AVAILABLE_COMMANDS = [ + "time - Say the current time", + "open browser - Open the web browser", + "open calculator - Open the calculator", + "open youtube - Open YouTube", + "search google - Search Google", + "shutdown - Shutdown the system", + "take a note - Save a voice note", + "perform - Perform a math operation", + "exit - Exit the application" +] + +# Global flag to control listening +listening = False + +def speak(text): + #Convert text to speech. + tts_engine.say(text) + tts_engine.runAndWait() + +def take_command(): + #Listen for a voice command and return it as text. + with sr.Microphone() as source: + recognizer.adjust_for_ambient_noise(source) + display_message("Listening...") + try: + audio = recognizer.listen(source, timeout=5) + command = recognizer.recognize_google(audio).lower() + display_message(f"You said: {command}") + return command + except sr.UnknownValueError: + speak("Sorry, I didn't catch that.") + display_message("Sorry, I didn't catch that.") + return "" + except sr.RequestError: + speak("Could not connect to the speech recognition service.") + display_message("Could not connect to the speech recognition service.") + return "" + except sr.WaitTimeoutError: + display_message("Listening timed out.") + return "" + +def run_command(command): + #Perform actions based on recognized commands. + if "time" in command: + now = datetime.now().strftime("%H:%M") + speak(f"The time is {now}.") + display_message(f"The time is {now}.") + + elif "open browser" in command: + speak("Opening browser.") + display_message("Opening browser...") + webbrowser.open("https://www.google.com") + + elif "open calculator" in command: + speak("Opening calculator.") + display_message("Opening calculator...") + if os.name == "nt": #For Windows + subprocess.Popen("calc.exe") + elif os.name == "posix": #For MacOS + subprocess.Popen(["gnome-calculator"]) + + elif "open youtube" in command: + speak("Opening YouTube.") + display_message("Opening YouTube...") + webbrowser.open("https://www.youtube.com") + + elif "search google" in command: + query = command.replace("search google", "").strip() + speak(f"Searching Google for {query}.") + display_message(f"Searching Google for: {query}...") + webbrowser.open(f"https://www.google.com/search?q={query}") + + elif "shutdown" in command: + speak("Shutting down the system.") + display_message("Shutting down the system...") + if os.name == "nt": + os.system("shutdown /s /t 1") + elif os.name == "posix": + os.system("sudo shutdown now") + + elif "take a note" in command: + speak("What would you like to note down?") + note = take_command() + if note: + with open("notes.txt", "a") as file: + file.write(f"{note}\n") + speak("Your note has been saved.") + display_message("Your note has been saved.") + else: + speak("Please specify a valid duration.") + display_message("Please specify a valid duration.") + + elif "perform" in command: + expression = command.replace("perform", "").strip() + try: + result = eval(expression) + speak(f"The result is {result}.") + display_message(f"The result of {expression} is {result}.") + except Exception as e: + speak("There was an error performing the calculation.") + display_message("There was an error performing the calculation.") + + elif "exit" in command: + speak("Goodbye!") + display_message("Goodbye!") + root.quit() # Exit the GUI + + else: + speak("I'm not sure how to do that.") + display_message("I'm not sure how to do that.") + +def display_message(message): + """Display messages and commands in the text area.""" + text_area.insert(tk.END, f"{message}\n") + text_area.yview(tk.END) # Auto-scroll to the bottom + +def display_available_commands(): + """Display all available commands in the command list area.""" + command_list_area.insert(tk.END, "Available Commands:\n") + for cmd in AVAILABLE_COMMANDS: + command_list_area.insert(tk.END, f"- {cmd}\n") + command_list_area.config(state=tk.DISABLED) # Make it read-only + +def continuous_listen(): + """Continuously listen for commands with pauses between executions.""" + global listening + while True: + if listening: + command = take_command() + if command: + run_command(command) + time.sleep(2) # Pause for 2 seconds after each command + +def toggle_listening(): + """Start or stop the listening process.""" + global listening + listening = not listening + if listening: + speak("Listening started.") + display_message("Listening started...") + start_listening_button.config(text="Stop Listening") + else: + speak("Listening stopped.") + display_message("Listening stopped...") + start_listening_button.config(text="Start Listening") + +def start_listening_thread(): + """Start the listening loop in a separate thread.""" + listening_thread = threading.Thread(target=continuous_listen) + listening_thread.daemon = True # Ensure thread exits when the main program does + listening_thread.start() + +# Set up the Tkinter GUI +root = tk.Tk() +root.title("Real-Time Voice Assistant") +root.geometry("600x500") + +# Text area to display commands and messages +text_area = scrolledtext.ScrolledText(root, wrap=tk.WORD, height=10, width=60) +text_area.pack(pady=10) + +# Text area to display available commands (read-only) +command_list_area = scrolledtext.ScrolledText(root, wrap=tk.WORD, height=6, width=60) +command_list_area.pack(pady=10) + +# Button to start/stop listening +start_listening_button = tk.Button(root, text="Start Listening", command=toggle_listening, font=("Arial", 14)) +start_listening_button.pack(pady=10) + +# Display available commands on startup +display_available_commands() + +# Start the assistant with a greeting +speak("How can I help you?") +display_message("How can I help you?") + +# Start the continuous listening thread +start_listening_thread() + +# Run the GUI main loop +root.mainloop()