#!/usr/bin/env python3
import argparse
import pandas as pd
import sqlite3
import os
import sys

def main():
    parser = argparse.ArgumentParser(description="Convert tabular file (csv/tsv/xlsx/bed) into SQLite DB.")
    parser.add_argument("-i", "--input", required=True, help="Input file (csv/tsv/xlsx/bed)")
    parser.add_argument("-o", "--output", required=True, help="Output SQLite file (e.g., db.sqlite)")
    parser.add_argument("-t", "--table", required=True, help="Table name for SQLite")
    parser.add_argument("-d", "--drop", nargs="+", default=None, help="Column(s) to drop (optional)")
    parser.add_argument("-ox", "--other", choices=["csv","tsv","xlsx"], help="Export table to other format too")

    args = parser.parse_args()

    # Detect file type
    ext = os.path.splitext(args.input)[-1].lower()

    if ext in [".csv"]:
        df = pd.read_csv(args.input)
    elif ext in [".tsv", ".bed"]:
        df = pd.read_csv(args.input, sep="\t")
    elif ext in [".xlsx", ".xls"]:
        df = pd.read_excel(args.input)
    else:
        sys.exit(f"⌠ Unsupported file format: {ext}")

    # 🔒 Fix duplicate column names - robust approach
    print(f"Original columns: {len(df.columns)}")
    
    # First, let's see what duplicates we have
    original_columns = df.columns.tolist()
    duplicates_found = []
    seen = set()
    
    for col in original_columns:
        if col in seen:
            duplicates_found.append(col)
        else:
            seen.add(col)
    
    if duplicates_found:
        print(f"⚠️ Found duplicate columns: {set(duplicates_found)}")
    
    # Use pandas make_unique method which is more reliable
    df.columns = pd.io.common.dedup_names(df.columns, is_potential_multiindex=False)
    
    print(f"After deduplication: {len(df.columns)} columns")
    
    # Double-check for any remaining duplicates
    final_duplicates = [col for col in df.columns if df.columns.tolist().count(col) > 1]
    if final_duplicates:
        print(f"⚠️ WARNING: Still have duplicates: {set(final_duplicates)}")
        # Last resort: add index to every duplicate
        columns = df.columns.tolist()
        for i, col in enumerate(columns):
            if columns.count(col) > 1:
                # Find all indices of this column
                indices = [j for j, x in enumerate(columns) if x == col]
                # Rename all but the first occurrence
                for k, idx in enumerate(indices[1:], 1):
                    columns[idx] = f"{col}_dup_{k}"
        df.columns = columns

    # Drop columns if requested
    if args.drop:
        df.drop(columns=args.drop, inplace=True, errors="ignore")

    # Print head
    print("\n📊 Data Preview (head):")
    print(df.head())
    print(f"\n[rows: {df.shape[0]}, columns: {df.shape[1]}]")

    # Write to SQLite
    conn = sqlite3.connect(args.output)
    df.to_sql(args.table, conn, if_exists="replace", index=False)

    # Auto-create index for genomic queries
    cur = conn.cursor()
    cols = [c.lower() for c in df.columns]
    if {"chr", "start", "end"}.issubset(set(cols)):
        try:
            cur.execute(f"CREATE INDEX idx_{args.table}_genomic ON {args.table}(chr, start, end);")
            print("✅ Index created on (chr, start, end)")
        except Exception as e:
            print(f"⚠️ Could not create index: {e}")
    conn.commit()
    conn.close()
    print(f"\n✅ SQLite database created: {args.output} (table: {args.table})")

    # Optional export to other formats
    if args.other:
        out_file = f"{args.table}.{args.other}"
        if args.other == "csv":
            df.to_csv(out_file, index=False)
        elif args.other == "tsv":
            df.to_csv(out_file, sep="\t", index=False)
        elif args.other == "xlsx":
            df.to_excel(out_file, index=False)
        print(f"📂 Extra export written: {out_file}")

if __name__ == "__main__":
    main()