import numpy as np import spacy from transformers import pipeline, BertTokenizer, BertForSequenceClassification from scipy.spatial.distance import cosine from nltk.corpus import stopwords, wordnet as wn import tensorflow_hub as hub import tensorflow as tf from nltk.tokenize import word_tokenize import nltk from itertools import combinations, product import logging import os # Load fine-tuned BERT model and tokenizer model_path = 'fine_tuned_bert' tokenizer = BertTokenizer.from_pretrained(model_path) model = BertForSequenceClassification.from_pretrained(model_path) classifier = pipeline('text-classification', model=model, tokenizer=tokenizer, return_all_scores=True) def softmax(logits): e_logits = np.exp(logits - np.max(logits)) return e_logits / e_logits.sum() sentence = "This pic is tough" original_inputs = tokenizer(sentence, return_tensors='pt') original_logits = model(**original_inputs).logits.detach().numpy() original_softmax = softmax(original_logits) original_confidence = np.max(original_softmax) original_prediction = np.argmax(original_softmax) print("Original prediction:", original_prediction) print("Original confidence:", original_confidence) print("Original logits:", original_logits)