import customtkinter as ctk
import webbrowser
import requests
from bs4 import BeautifulSoup
import re
import json
import csv
import os
from tkinter import filedialog
import unicodedata
from urllib.parse import urljoin
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from twisted.internet import reactor
from scrapy import signals
import threading
from queue import Queue
import mysql.connector
from PIL import Image, ImageTk



class EmailSpider(scrapy.Spider):
    name = 'email_spider'
    
    def __init__(self, *args, **kwargs):
        super(EmailSpider, self).__init__(*args, **kwargs)
        self.start_urls = [kwargs.get('start_url')]
        self.email_queue = kwargs.get('email_queue')
        self.visited_urls = set()
        self.allowed_domains = kwargs.get('allowed_domains', [])

    def parse(self, response):
        if response.url in self.visited_urls:
            return
        self.visited_urls.add(response.url)

        # Extract emails from the current page
        emails = self.extract_emails(response.text)
        for email in emails:
            self.email_queue.put(email)

        # Follow links to other pages
        for href in response.css('a::attr(href)').extract():
            full_url = response.urljoin(href)
            if full_url not in self.visited_urls and self.is_allowed_domain(full_url):
                yield scrapy.Request(full_url, callback=self.parse)

    def extract_emails(self, text):
        # Improved email regex pattern
        email_pattern = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,7}\b'
        potential_emails = re.findall(email_pattern, text)
        
        # Filter out non-human emails
        human_emails = [
            email.lower() for email in potential_emails
            if self.is_human_email(email)
        ]
        
        return list(set(human_emails))  # Remove duplicates

    def is_human_email(self, email):
        # Check if email contains unwanted characters or file extensions
        unwanted_chars = ['%', '+']
        file_extensions = ['.svg', '.jpg', '.png', '.jpeg', '.gif', '.pdf', '.doc', '.docx']
        
        if any(char in email for char in unwanted_chars):
            return False
        if any(email.lower().endswith(ext) for ext in file_extensions):
            return False
        
        # Additional checks can be added here
        return True

    def is_allowed_domain(self, url):
        if not self.allowed_domains:
            return True
        return any(domain in url for domain in self.allowed_domains)

class App(ctk.CTk):
    def __init__(self):
        super().__init__()

        # Configure window
        self.title("V-krapt Dashboard")
        self.geometry("900x600")

        # Set the appearance mode and color theme
        ctk.set_appearance_mode("dark")
        ctk.set_default_color_theme("blue")

        # Configure grid layout (2x1)
        self.grid_columnconfigure(1, weight=1)
        self.grid_rowconfigure(0, weight=1)

        self.email_queue = Queue()
        self.crawler_process = None
        self.spider_running = False
        self.extracted_emails = set() 

        # Sidebar frame with buttons
        self.sidebar_frame = ctk.CTkFrame(self, width=200, corner_radius=0)
        self.sidebar_frame.grid(row=0, column=0, sticky="nsew")
        self.sidebar_frame.grid_rowconfigure(7, weight=1)  # Increased to accommodate new buttons

        self.logo_label = ctk.CTkLabel(self.sidebar_frame, text="V-krapt", font=ctk.CTkFont(size=20, weight="bold"))
        self.logo_label.grid(row=0, column=0, padx=20, pady=(20, 10))

        # Create sidebar buttons
         # Create sidebar buttons
        self.learn_button = self.create_button(self.sidebar_frame, "Learn", 1)
        self.contact_button = self.create_button(self.sidebar_frame, "Contact", 2)
        self.scrapper_button = self.create_button(self.sidebar_frame, "V-krapt Scrapper", 3)
        self.spider_button = self.create_button(self.sidebar_frame, "V-krapt Spider", 4)
        self.filter_el_button = self.create_button(self.sidebar_frame, "Filter EL", 5)  # New button
        self.db_ant_button = self.create_button(self.sidebar_frame, "DB-ANT", 6)  # New button
        self.logout_button = self.create_button(self.sidebar_frame, "Log Out", 7)

        # Create main frame
        self.main_frame = ctk.CTkFrame(self, fg_color="transparent")
        self.main_frame.grid(row=0, column=1, padx=(20, 20), pady=(20, 20), sticky="nsew")
        self.main_frame.grid_columnconfigure(0, weight=1)
        self.main_frame.grid_rowconfigure(0, weight=1)

        # Add a welcome label to the main frame
        self.info_label = ctk.CTkLabel(self.main_frame, text="Welcome to V-krapt Dashboard!\nClick a button to learn more.", 
                                       font=ctk.CTkFont(size=18), wraplength=400, justify="center")
        self.info_label.grid(row=0, column=0, padx=20, pady=20, sticky="nsew")

    def create_button(self, parent, text, row):
        button = ctk.CTkButton(parent, text=text, anchor="w", command=lambda: self.button_callback(text))
        button.grid(row=row, column=0, padx=20, pady=10, sticky="ew")
        return button

    def button_callback(self, button_text):
        # Clear the main frame
        for widget in self.main_frame.winfo_children():
            widget.destroy()

        if button_text == "Learn":
            self.show_learn_page()
        elif button_text == "Contact":
            self.show_contact_page()
        elif button_text == "V-krapt Scrapper":
            self.show_scrapper_page()
        elif button_text == "Filter EL":
            self.show_filter_el_page()
        elif button_text == "DB-ANT":
            self.show_db_ant_page()
        elif button_text == "V-krapt Spider":
            self.show_spider_page()
        elif button_text == "Log Out":
            self.logout()
        

    def show_learn_page(self):
        # Title
        title_label = ctk.CTkLabel(self.main_frame, text="Learn V-krapt", font=ctk.CTkFont(size=24, weight="bold"))
        title_label.pack(pady=(20, 10))

        # Description
        description = "V-krapt is a powerful suite of web scraping and crawling tools. Learn how to use them effectively to gather and analyze web data."
        desc_label = ctk.CTkLabel(self.main_frame, text=description, wraplength=400, justify="center")
        desc_label.pack(pady=10)

        # YouTube video thumbnail and button
        def open_youtube_video():
            webbrowser.open("https://youtu.be/AkiAydJ172Y")

        # Load an image to represent the YouTube thumbnail
        thumbnail_image = Image.open("V-krapt.png")
        thumbnail_image = thumbnail_image.resize((560, 315))  # Resize the image to match video size
        thumbnail_photo = ImageTk.PhotoImage(thumbnail_image)

        # Create a button with the image that opens the video link
        video_button = ctk.CTkButton(self.main_frame, image=thumbnail_photo, text="", command=open_youtube_video)
        video_button.image = thumbnail_photo  # Keep a reference to the image to avoid garbage collection
        video_button.pack(pady=20)

    def show_contact_page(self):
        # Title
        title_label = ctk.CTkLabel(self.main_frame, text="Contact Us", font=ctk.CTkFont(size=24, weight="bold"))
        title_label.pack(pady=(20, 10))

        # Description
        description = "Have questions or need support? Our team is here to help. Reach out to us for any inquiries about V-krapt tools or services."
        desc_label = ctk.CTkLabel(self.main_frame, text=description, wraplength=400, justify="center")
        desc_label.pack(pady=10)

        # Contact information
        contact_info = "Email: vkrapt.team@gmail.com\nWhatsapp : +212 650568750 \nAddress: NO ADDRESS CHECK THE WEBSITE ::|::/:="
        contact_label = ctk.CTkLabel(self.main_frame, text=contact_info, justify="center")
        contact_label.pack(pady=20)

        # Website button
        website_button = ctk.CTkButton(self.main_frame, text="Visit Contact Page", command=lambda: self.open_website("https://vkrapt.pages.dev"))
        website_button.pack(pady=20)

    def open_website(self, url):
        webbrowser.open(url)

    def logout(self):
        self.quit()

    def show_scrapper_page(self):

        self.extracted_emails = set()  # Store unique emails
        # Title
        title_label = ctk.CTkLabel(self.main_frame, text="V-krapt Scrapper", font=ctk.CTkFont(size=24, weight="bold"))
        title_label.pack(pady=(20, 10))

        # URL input field
        self.url_entry = ctk.CTkEntry(self.main_frame, width=300, placeholder_text="Enter URL")
        self.url_entry.pack(pady=10)

        # File format selection
        self.file_format = ctk.StringVar(value=".txt")
        format_frame = ctk.CTkFrame(self.main_frame)
        format_frame.pack(pady=10)
        ctk.CTkRadioButton(format_frame, text=".txt", variable=self.file_format, value=".txt").pack(side="left", padx=10)
        ctk.CTkRadioButton(format_frame, text=".csv", variable=self.file_format, value=".csv").pack(side="left", padx=10)

        # Results table
        # Results table
        self.results_table = ctk.CTkTextbox(self.main_frame, width=500, height=200)
        self.results_table.pack(pady=20)

        # Action buttons
        button_frame = ctk.CTkFrame(self.main_frame)
        button_frame.pack(pady=10)
        ctk.CTkButton(button_frame, text="Extract Emails", command=self.extract_emails).pack(side="left", padx=10)
        ctk.CTkButton(button_frame, text="Extract Links", command=self.extract_links).pack(side="left", padx=10)
        ctk.CTkButton(button_frame, text="Download", command=self.download_results).pack(side="left", padx=10)

        

    def extract_emails(self):
        url = self.url_entry.get()
        if not url:
            self.results_table.insert("end", "Please enter a URL.\n")
            return

        try:
            response = requests.get(url)
            response.raise_for_status()
        except requests.RequestException as e:
            self.results_table.insert("end", f"Error fetching the URL: {str(e)}\n")
            return

        content_type = response.headers.get('Content-Type', '').lower()
        
        new_emails = set()
        new_emails.update(self.extract_from_html(response.text))
        new_emails.update(self.extract_from_js(response.text))
        
        if 'application/json' in content_type:
            new_emails.update(self.extract_from_json(response.text))
        elif 'application/xml' in content_type or 'text/xml' in content_type:
            new_emails.update(self.extract_from_xml(response.text))

        # Add new unique emails to the set
        new_unique_emails = new_emails - self.extracted_emails
        self.extracted_emails.update(new_unique_emails)

        # Display results
        if new_unique_emails:
            new_content = "\n".join(new_unique_emails)
            self.results_table.insert("end", f"\n{new_content}" if self.results_table.get("1.0", "end-1c") else new_content)
        else:
            self.results_table.insert("end", "No new emails found.\n")

    def extract_from_html(self, html_content):
        soup = BeautifulSoup(html_content, 'html.parser')
        text_content = soup.get_text()
        return self.find_emails(text_content)

    def extract_from_js(self, html_content):
        soup = BeautifulSoup(html_content, 'html.parser')
        scripts = soup.find_all('script')
        emails = set()
        for script in scripts:
            script_content = script.string if script.string else ''
            emails.update(self.find_emails(script_content))
        return emails

    def extract_from_json(self, json_content):
        try:
            json_data = json.loads(json_content)
            json_string = json.dumps(json_data)
            return self.find_emails(json_string)
        except json.JSONDecodeError:
            return set()

    def extract_from_xml(self, xml_content):
        return self.find_emails(xml_content)
    
    def find_emails(self, text):
        # Improved email regex pattern
        email_pattern = r'''(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])'''
        
        # Normalize Unicode characters
        text = unicodedata.normalize('NFKD', text).encode('ASCII', 'ignore').decode('ASCII')
        
        # Find all email matches
        emails = re.findall(email_pattern, text, re.IGNORECASE)
        
        # Additional filtering to remove false positives
        filtered_emails = set()
        for email in emails:
            if self.is_valid_email(email):
                filtered_emails.add(email.lower())  # Convert to lowercase for uniqueness
        
        return filtered_emails
    
    def is_valid_email(self, email):
        # Additional validation checks
        if len(email) > 254:  # Maximum allowed length for an email
            return False
        try:
            local_part, domain = email.rsplit('@', 1)
        except ValueError:
            return False
        if len(local_part) > 64:  # Maximum length of local part
            return False
        if domain.endswith('.'):  # Domain can't end with a dot
            return False
        if not re.match(r'^[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.[a-zA-Z]{2,}$', domain):
            return False
        return True

    def extract_links(self):
        url = self.url_entry.get()
        if not url:
            self.results_table.insert("end", "Please enter a URL.\n")
            return

        try:
            response = requests.get(url)
            response.raise_for_status()
        except requests.RequestException as e:
            self.results_table.insert("end", f"Error fetching the URL: {str(e)}\n")
            return

        soup = BeautifulSoup(response.text, 'html.parser')
        links = soup.find_all('a')

        self.results_table.delete("1.0", "end")  # Clear previous results
        self.results_table.insert("end", f"Links found on {url}:\n\n")

        for link in links:
            href = link.get('href')
            if href:
                if href.startswith('/'):
                    full_url = urljoin(url, href)
                elif href.startswith('http'):
                    full_url = href
                else:
                    continue  # Skip non-http links like javascript: or mailto:

                link_text = link.text.strip() or "[No text]"
                self.results_table.insert("end", f"{link_text}: {full_url}\n")

        self.results_table.insert("end", f"\nTotal links found: {len(links)}\n")

    def download_results(self):
        content = "\n".join(self.extracted_emails)
        if not content:
            print("No results to download.")
            return

        file_format = self.file_format.get()
        file_types = [('Text File', '*.txt'), ('CSV File', '*.csv')] if file_format == '.txt' else [('CSV File', '*.csv'), ('Text File', '*.txt')]
        file_path = filedialog.asksaveasfilename(defaultextension=file_format, filetypes=file_types)

        if file_path:
            with open(file_path, 'w', newline='') as file:
                if file_format == '.txt':
                    file.write(content)
                else:
                    writer = csv.writer(file)
                    writer.writerow(['Email'])
                    for email in self.extracted_emails:
                        writer.writerow([email])
            print(f"Results downloaded to {file_path}")

    def show_filter_el_page(self):
        # Clear the main frame
        for widget in self.main_frame.winfo_children():
            widget.destroy()

        # Title
        title_label = ctk.CTkLabel(self.main_frame, text="Filter EL", font=ctk.CTkFont(size=24, weight="bold"))
        title_label.pack(pady=(20, 10))

        # File selection
        self.file_path = ctk.StringVar()
        file_entry = ctk.CTkEntry(self.main_frame, textvariable=self.file_path, width=300, state="readonly")
        file_entry.pack(pady=10)

        select_file_button = ctk.CTkButton(self.main_frame, text="Select File", command=self.select_file)
        select_file_button.pack(pady=10)

        # Domain extensions input
        self.domain_extensions = ctk.StringVar()
        domain_label = ctk.CTkLabel(self.main_frame, text="Enter allowed domain extensions (comma-separated, e.g., gmail.com, yahoo.com):")
        domain_label.pack(pady=(10, 5))
        domain_entry = ctk.CTkEntry(self.main_frame, textvariable=self.domain_extensions, width=300)
        domain_entry.pack(pady=(0, 10))

        # Filter button
        filter_button = ctk.CTkButton(self.main_frame, text="Filter Emails", command=self.filter_emails)
        filter_button.pack(pady=10)

        # Results display
        self.results_text = ctk.CTkTextbox(self.main_frame, width=500, height=200)
        self.results_text.pack(pady=20)

        # Download options
        download_frame = ctk.CTkFrame(self.main_frame)
        download_frame.pack(pady=10)

        self.download_format = ctk.StringVar(value=".txt")
        ctk.CTkRadioButton(download_frame, text=".txt", variable=self.download_format, value=".txt").pack(side="left", padx=10)
        ctk.CTkRadioButton(download_frame, text=".csv", variable=self.download_format, value=".csv").pack(side="left", padx=10)

        download_button = ctk.CTkButton(self.main_frame, text="Download Filtered Results", command=self.download_filtered_results)
        download_button.pack(pady=10)

   

    def show_spider_page(self):
        # Clear the main frame
        for widget in self.main_frame.winfo_children():
            widget.destroy()

        # Title
        title_label = ctk.CTkLabel(self.main_frame, text="V-krapt Spider", font=ctk.CTkFont(size=24, weight="bold"))
        title_label.pack(pady=(20, 10))

        # URL input field
        self.spider_url_entry = ctk.CTkEntry(self.main_frame, width=300, placeholder_text="Enter URL")
        self.spider_url_entry.pack(pady=10)

        # File format selection
        self.spider_file_format = ctk.StringVar(value=".txt")
        format_frame = ctk.CTkFrame(self.main_frame)
        format_frame.pack(pady=10)
        ctk.CTkRadioButton(format_frame, text=".txt", variable=self.spider_file_format, value=".txt").pack(side="left", padx=10)
        ctk.CTkRadioButton(format_frame, text=".csv", variable=self.spider_file_format, value=".csv").pack(side="left", padx=10)

        # Results table
        self.spider_results_table = ctk.CTkTextbox(self.main_frame, width=500, height=200)
        self.spider_results_table.pack(pady=20)

        # Add domain restriction input
        self.domain_entry = ctk.CTkEntry(self.main_frame, width=300, placeholder_text="Enter allowed domains (comma-separated)")
        self.domain_entry.pack(pady=10)

        # Action buttons
        button_frame = ctk.CTkFrame(self.main_frame)
        button_frame.pack(pady=10)
        self.start_button = ctk.CTkButton(button_frame, text="Start Scraping", command=self.start_spider_scraping)
        self.start_button.pack(side="left", padx=10)
        self.stop_button = ctk.CTkButton(button_frame, text="Stop Scraping", command=self.stop_spider_scraping, state="disabled")
        self.stop_button.pack(side="left", padx=10)
        ctk.CTkButton(button_frame, text="Download", command=self.download_spider_results).pack(side="left", padx=10)

    def start_spider_scraping(self):
        if self.spider_running:
            self.spider_results_table.insert("end", "Spider is already running.\n")
            return

        url = self.spider_url_entry.get()
        if not url:
            self.spider_results_table.insert("end", "Please enter a URL.\n")
            return

        allowed_domains = [domain.strip() for domain in self.domain_entry.get().split(',') if domain.strip()]

        self.spider_results_table.delete("1.0", "end")  # Clear previous results
        self.spider_results_table.insert("end", f"Starting spider on {url}\n")
        if allowed_domains:
            self.spider_results_table.insert("end", f"Allowed domains: {', '.join(allowed_domains)}\n")

        settings = get_project_settings()
        settings.set('ROBOTSTXT_OBEY', False)
        settings.set('CONCURRENT_REQUESTS', 10)  # Increased for faster crawling
        settings.set('DOWNLOAD_DELAY', 0.5)  # Add a small delay between requests

        self.crawler_process = CrawlerProcess(settings)
        self.crawler_process.crawl(EmailSpider, start_url=url, email_queue=self.email_queue, allowed_domains=allowed_domains)

        self.spider_running = True
        self.start_button.configure(state="disabled")
        self.stop_button.configure(state="normal")
        self.after(100, self.check_email_queue)

        # Run the crawler in a separate thread
        threading.Thread(target=self.run_spider, daemon=True).start()

    
    def run_spider(self):
        self.crawler_process.start()
        self.crawler_process.join()
        self.after(0, self.spider_finished)

    
    def spider_finished(self):
        self.spider_running = False
        self.start_button.configure(state="normal")
        self.stop_button.configure(state="disabled")
        self.spider_results_table.insert("end", "Spider finished crawling.\n")

    def stop_spider_scraping(self):
        if not self.spider_running:
            self.spider_results_table.insert("end", "Spider is not running.\n")
            return

        self.crawler_process.stop()
        self.spider_running = False
        self.start_button.configure(state="normal")
        self.stop_button.configure(state="disabled")
        self.spider_results_table.insert("end", "Spider stopped.\n")

    def check_email_queue(self):
        while not self.email_queue.empty():
            email = self.email_queue.get()
            if email not in self.extracted_emails:
                self.extracted_emails.add(email)
                self.update_results(email)
        
        if self.spider_running:
            self.after(100, self.check_email_queue)

    def update_results(self, email):
        self.spider_results_table.insert("end", f"Found email: {email}\n")
        self.spider_results_table.see("end") 

    def download_spider_results(self):
        if not self.extracted_emails:
            print("No results to download.")
            return

        file_format = self.spider_file_format.get()
        file_types = [('Text File', '*.txt'), ('CSV File', '*.csv')] if file_format == '.txt' else [('CSV File', '*.csv'), ('Text File', '*.txt')]
        file_path = filedialog.asksaveasfilename(defaultextension=file_format, filetypes=file_types)

        if file_path:
            with open(file_path, 'w', newline='') as file:
                if file_format == '.txt':
                    for email in self.extracted_emails:
                        file.write(f"{email}\n")
                else:
                    writer = csv.writer(file)
                    writer.writerow(['Email'])
                    for email in self.extracted_emails:
                        writer.writerow([email])
            print(f"Results downloaded to {file_path}")

    #Filter emails :

    def select_file(self):
        file_path = filedialog.askopenfilename(filetypes=[("Text files", "*.txt"), ("CSV files", "*.csv")])
        if file_path:
            self.file_path.set(file_path)

    def filter_emails(self):
        file_path = self.file_path.get()
        if not file_path:
            self.results_text.insert("end", "Please select a file first.\n")
            return

        allowed_extensions = [ext.strip().lower() for ext in self.domain_extensions.get().split(',') if ext.strip()]

        filtered_emails = []
        total_emails = 0

        try:
            if file_path.endswith('.txt'):
                with open(file_path, 'r') as file:
                    emails = file.read().splitlines()
            elif file_path.endswith('.csv'):
                with open(file_path, 'r', newline='') as file:
                    reader = csv.reader(file)
                    emails = [row[0] for row in reader if row]  # Assuming email is in the first column
            else:
                self.results_text.insert("end", "Unsupported file format.\n")
                return

            total_emails = len(emails)
            filtered_emails = [email for email in emails if self.is_human_email_ref(email, allowed_extensions)]

        except Exception as e:
            self.results_text.insert("end", f"Error reading file: {str(e)}\n")
            return

        self.filtered_emails = filtered_emails  # Store for downloading later
        self.results_text.delete("1.0", "end")
        self.results_text.insert("end", f"Total emails: {total_emails}\n")
        self.results_text.insert("end", f"Filtered emails: {len(filtered_emails)}\n")
        self.results_text.insert("end", "Filtered email list:\n")
        for email in filtered_emails:
            self.results_text.insert("end", f"{email}\n")

    def is_human_email_ref(self, email, allowed_extensions):
        # Check if email contains unwanted characters or file extensions
        unwanted_chars = ['%', '+']
        file_extensions = ['.svg', '.jpg', '.png', '.jpeg', '.gif', '.pdf', '.doc', '.docx']
        
        email = email.lower().strip()
        
        if any(char in email for char in unwanted_chars):
            return False
        if any(email.endswith(ext) for ext in file_extensions):
            return False
        
        # Basic email format check
        if not re.match(r'^[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,}$', email):
            return False
        
        # Check if the email domain is in the allowed extensions
        if allowed_extensions:
            domain = email.split('@')[-1]
            if domain not in allowed_extensions:
                return False
        
        return True

    def download_filtered_results(self):
        if not hasattr(self, 'filtered_emails'):
            self.results_text.insert("end", "Please filter emails first.\n")
            return

        file_format = self.download_format.get()
        file_types = [('Text File', '*.txt'), ('CSV File', '*.csv')] if file_format == '.txt' else [('CSV File', '*.csv'), ('Text File', '*.txt')]
        file_path = filedialog.asksaveasfilename(defaultextension=file_format, filetypes=file_types)

        if file_path:
            try:
                with open(file_path, 'w', newline='') as file:
                    if file_format == '.txt':
                        for email in self.filtered_emails:
                            file.write(f"{email}\n")
                    else:  # CSV format
                        writer = csv.writer(file)
                        writer.writerow(['Email'])  # Header
                        for email in self.filtered_emails:
                            writer.writerow([email])
                self.results_text.insert("end", f"Filtered results saved to {file_path}\n")
            except Exception as e:
                self.results_text.insert("end", f"Error saving file: {str(e)}\n")

    def show_db_ant_page(self):
        # Clear the main frame
        for widget in self.main_frame.winfo_children():
            widget.destroy()

        # Title
        title_label = ctk.CTkLabel(self.main_frame, text="DB-ANT: MySQL Database Operations", font=ctk.CTkFont(size=24, weight="bold"))
        title_label.pack(pady=(20, 10))

        # Database connection frame
        db_frame = ctk.CTkFrame(self.main_frame)
        db_frame.pack(pady=10, padx=10, fill="x")

        ctk.CTkLabel(db_frame, text="Host:").grid(row=0, column=0, padx=5, pady=5)
        self.host_entry = ctk.CTkEntry(db_frame)
        self.host_entry.grid(row=0, column=1, padx=5, pady=5)

        ctk.CTkLabel(db_frame, text="User:").grid(row=1, column=0, padx=5, pady=5)
        self.user_entry = ctk.CTkEntry(db_frame)
        self.user_entry.grid(row=1, column=1, padx=5, pady=5)

        ctk.CTkLabel(db_frame, text="Password:").grid(row=2, column=0, padx=5, pady=5)
        self.password_entry = ctk.CTkEntry(db_frame, show="*")
        self.password_entry.grid(row=2, column=1, padx=5, pady=5)

        ctk.CTkLabel(db_frame, text="Database:").grid(row=3, column=0, padx=5, pady=5)
        self.db_name_entry = ctk.CTkEntry(db_frame)
        self.db_name_entry.grid(row=3, column=1, padx=5, pady=5)

        # Table name input
        self.table_name_entry = ctk.CTkEntry(self.main_frame, width=300, placeholder_text="Enter Table Name")
        self.table_name_entry.pack(pady=10)

        # File selection
        self.file_path = ctk.StringVar()
        file_entry = ctk.CTkEntry(self.main_frame, textvariable=self.file_path, width=300, state="readonly")
        file_entry.pack(pady=10)

        select_file_button = ctk.CTkButton(self.main_frame, text="Select File", command=self.select_file_for_db)
        select_file_button.pack(pady=10)

        # Action buttons
        button_frame = ctk.CTkFrame(self.main_frame)
        button_frame.pack(pady=10)
        ctk.CTkButton(button_frame, text="Connect & Create Table", command=self.connect_and_create_table).pack(side="left", padx=10)
        ctk.CTkButton(button_frame, text="Insert Data", command=self.insert_data_to_db).pack(side="left", padx=10)

        # Results display
        self.db_results_text = ctk.CTkTextbox(self.main_frame, width=500, height=200)
        self.db_results_text.pack(pady=20)

    def select_file_for_db(self):
        file_types = [("Text files", "*.txt"), ("CSV files", "*.csv")]
        file_path = filedialog.askopenfilename(filetypes=file_types)
        if file_path:
            self.file_path.set(file_path)

    def connect_and_create_table(self):
        try:
            connection = mysql.connector.connect(
                host=self.host_entry.get(),
                user=self.user_entry.get(),
                password=self.password_entry.get(),
                database=self.db_name_entry.get()
            )
            cursor = connection.cursor()

            table_name = self.table_name_entry.get()
            create_table_query = f"""
            CREATE TABLE IF NOT EXISTS {table_name} (
                id INT AUTO_INCREMENT PRIMARY KEY,
                email LONGTEXT UNIQUE
            )
            """
            cursor.execute(create_table_query)
            connection.commit()

            self.db_results_text.insert("end", f"Successfully connected and created table '{table_name}'.\n")
        except mysql.connector.Error as err:
            self.db_results_text.insert("end", f"Error: {err}\n")
        finally:
            if connection.is_connected():
                cursor.close()
                connection.close()

    def insert_data_to_db(self):
        file_path = self.file_path.get()
        if not file_path:
            self.db_results_text.insert("end", "Please select a file first.\n")
            return

        try:
            connection = mysql.connector.connect(
                host=self.host_entry.get(),
                user=self.user_entry.get(),
                password=self.password_entry.get(),
                database=self.db_name_entry.get()
            )
            cursor = connection.cursor()

            table_name = self.table_name_entry.get()
            insert_query = f"INSERT IGNORE INTO {table_name} (email) VALUES (%s)"

            emails = []
            if file_path.endswith('.txt'):
                with open(file_path, 'r') as file:
                    emails = [line.strip() for line in file if line.strip()]
            elif file_path.endswith('.csv'):
                with open(file_path, 'r', newline='') as file:
                    reader = csv.reader(file)
                    emails = [row[0] for row in reader if row]

            for email in emails:
                cursor.execute(insert_query, (email,))

            connection.commit()
            self.db_results_text.insert("end", f"Successfully inserted {len(emails)} emails into '{table_name}'.\n")
        except mysql.connector.Error as err:
            self.db_results_text.insert("end", f"Error: {err}\n")
        finally:
            if connection.is_connected():
                cursor.close()
                connection.close()
if __name__ == "__main__":
    app = App()
    app.mainloop()