#!/usr/bin/python

import pandas as pd
import numpy as np
from sklearn.metrics import precision_recall_curve

def parse_hmmer_results(filename, positive_ids):
    data = []
    with open(filename, 'r') as f:
        for line in f:
            if not line.strip() or line.startswith('Sequence') or line.startswith('--------'):
                continue
            parts = line.strip().split()
            if len(parts) >= 2:
                try:
                    full_id = parts[0]
                    sequence_id = full_id.split('|')[0]

                    score = float(parts[1])

                    label = 1 if sequence_id in positive_ids else 0

                    data.append({'sequence_id': sequence_id, 'score': score, 'label': label})

                except (ValueError, IndexError):
                    continue
    return pd.DataFrame(data)

with open('subfamily_idd.txt', 'r') as f:
    positive_ids = [line.strip().strip('>') for line in f if line.strip()]

df = parse_hmmer_results('pr11_short_column.txt', set(positive_ids))

print(f"Всего последовательностей: {len(df)}")
print(f"Positives: {df['label'].sum()}")
print(f"Negatives: {len(df) - df['label'].sum()}")

scores = df['score'].values
true_labels = df['label'].values

precision, recall, thresholds = precision_recall_curve(true_labels, scores)
f1_scores = [2 * (p * r) / (p + r) if (p + r) > 0 else 0 
             for p, r in zip(precision[:-1], recall[:-1])]

optimal_idx = np.argmax(f1_scores)
optimal_threshold = thresholds[optimal_idx]
optimal_f1 = f1_scores[optimal_idx]
optimal_precision = precision[optimal_idx]
optimal_recall = recall[optimal_idx]

above_threshold = df[df['score'] >= optimal_threshold]
below_threshold = df[df['score'] < optimal_threshold]

tp = above_threshold['label'].sum()
fp = len(above_threshold) - tp
fn = df['label'].sum() - tp
tn = len(below_threshold[below_threshold['label'] == 0])

print(f"Оптимальный порог T: {optimal_threshold:.1f}")
print(f"F1-score: {optimal_f1:.3f}")
print(f"Precision (Точность): {optimal_precision:.3f}")
print(f"Recall (Полнота): {optimal_recall:.3f}")

print(f"\nTrue Positives (TP):  {tp:4d}")
print(f"False Positives (FP): {fp:4d}")
print(f"True Negatives (TN):  {tn:4d}")
print(f"False Negatives (FN): {fn:4d}")

print(f"\nСтатистика для порога T = {optimal_threshold:.1f}:")
print(f"Последовательностей выше порога: {len(above_threshold)}")
print(f"Из них истинно положительных: {tp} ({tp/len(above_threshold)*100:.1f}%)")
print(f"Последовательностей ниже порога: {len(below_threshold)}")
print(f"Из них истинно отрицательных: {tn} ({tn/len(below_threshold)*100:.1f}%)")

positive_scores = df[df['label'] == 1]['score']
negative_scores = df[df['label'] == 0]['score']

print(f"\nРаспределение scores:")
print(f"Положительные: {positive_scores.min():.1f}-{positive_scores.max():.1f}")
print(f"Отрицательные: {negative_scores.min():.1f}-{negative_scores.max():.1f}")
print(f"Средний score положительных: {positive_scores.mean():.1f}")
print(f"Средний score отрицательных: {negative_scores.mean():.1f}")
