Skip to content

Commit 6fa42ec

Browse files
Merge pull request #1 from anuragjain-git/anurag
Anurag
2 parents c2a39dc + a0ee383 commit 6fa42ec

File tree

8 files changed

+288
-41
lines changed

8 files changed

+288
-41
lines changed

__pycache__/model.cpython-311.pyc

6.66 KB
Binary file not shown.

model.py

+14-41
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,9 @@
1010
from keras.preprocessing.text import Tokenizer
1111
from keras.preprocessing.sequence import pad_sequences
1212
from sklearn.preprocessing import LabelEncoder
13-
from sklearn.feature_extraction.text import CountVectorizer
14-
from sklearn.metrics.pairwise import cosine_similarity
13+
14+
import pickle
15+
1516

1617
nltk.download('stopwords')
1718
nltk.download('punkt')
@@ -37,23 +38,6 @@ def preprocess_text_list(text_list):
3738
preprocessed_texts = [preprocess_text(text) for text in text_list]
3839
return preprocessed_texts
3940

40-
def check_relevance(new_text, dataset_texts, similarity_threshold=0.5):
41-
# Preprocess the new text
42-
preprocessed_new_text = preprocess_text(new_text)
43-
44-
# Preprocess each text in the dataset
45-
preprocessed_dataset_texts = [preprocess_text(text) for text in dataset_texts]
46-
47-
# Calculate similarity between the new text and each text in the dataset
48-
vectorizer = CountVectorizer().fit_transform([preprocessed_new_text] + preprocessed_dataset_texts)
49-
similarity_matrix = cosine_similarity(vectorizer)
50-
51-
# Get the similarity scores
52-
similarity_scores = similarity_matrix[0][1:]
53-
54-
# Check if any text in the dataset is similar to the new text
55-
return any(score >= similarity_threshold for score in similarity_scores)
56-
5741
texts = [
5842
"Debit INR 500.00 A/c no. XX8926 12-10-23 20:02:19 UPI/P2A/328546155288/ANURAG JAIN SMS BLOCKUPI Cust ID to 01351860002, if not you - Axis Bank",
5943
"Debit INR 109.00 A/c no. XX8926 27-01-24 11:36:57 UPI/P2M/6321837696198/Add Money to Wallet SMS BLOCKUPI Cust ID to 919951860002, if not you - Axis Bank",
@@ -87,6 +71,10 @@ def check_relevance(new_text, dataset_texts, similarity_threshold=0.5):
8771
tokenizer = Tokenizer(oov_token='<OOV>')
8872
tokenizer.fit_on_texts(texts)
8973

74+
# Save the tokenizer to a file
75+
with open('tokenizer.pkl', 'wb') as token_file:
76+
pickle.dump(tokenizer, token_file)
77+
9078
# Convert the text data to sequences of integers using the tokenizer
9179
sequences = tokenizer.texts_to_sequences(texts)
9280
# Pad the sequences to ensure uniform length for neural network input
@@ -116,29 +104,14 @@ def check_relevance(new_text, dataset_texts, similarity_threshold=0.5):
116104

117105
# Convert labels to NumPy array
118106
labels_np = np.array(encoded_labels)
107+
# Replace the lambda function with a named function
108+
def custom_sparse_softmax_cross_entropy(labels, logits):
109+
return tf.compat.v1.losses.sparse_softmax_cross_entropy(labels, logits)
119110

120-
# Compile the model with the updated loss function
121-
model.compile(optimizer='adam', loss=lambda labels, logits: tf.compat.v1.losses.sparse_softmax_cross_entropy(labels, logits), metrics=['accuracy'])
111+
# Compile the model with the named function
112+
model.compile(optimizer='adam', loss=custom_sparse_softmax_cross_entropy, metrics=['accuracy'])
122113

123114
# Train the model
124115
model.fit(padded_sequences, labels_np, epochs=100)
125-
126-
# Assuming 'new_texts' is a list of new messages
127-
new_texts = ["debit text here asjkxbsa axbjsa xjasbx xasgxya yxyagsvxtyasf 61t72t7172 ","credit INR refund 100","Refund Processed: Refund of Rs. 237.0 for favoru Household wrap ... is successfully transferred and will be credited to your account by Oct 04, 2023.", "UPI mandate has been successfully created towards TATA TECHNOLOGIES LI for INR 15000.00. Funds blocked from A/c no. XX8926. 12e5d61d2ac145738241fbf117bb295c@okaxis - Axis Bank"]
128-
129-
# Check relevance and print the result
130-
for text in new_texts:
131-
new_sequences = tokenizer.texts_to_sequences([text])
132-
new_padded_sequences = pad_sequences(new_sequences, padding='post')
133-
134-
# Predictions
135-
predictions = model.predict(new_padded_sequences)
136-
predicted_labels = [label for label in predictions.argmax(axis=1)]
137-
138-
# Inverse transform predicted labels to original class labels
139-
predicted_class_labels = label_encoder.inverse_transform(predicted_labels)
140-
141-
# Check relevance and print the result
142-
is_relevant = check_relevance(text, texts)
143-
relevance_status = "Relevant" if is_relevant else "Irrelevant"
144-
print(f"Text: {text} | Predicted Label: {predicted_class_labels[0]} | Relevance: {relevance_status}")
116+
# Save the model in the recommended Keras format
117+
model.save('trained_model.keras')

runmodel.py

+62
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
import tensorflow as tf
2+
from keras.models import load_model
3+
from model import preprocess_text # Import your preprocessing function
4+
import pickle
5+
from sklearn.feature_extraction.text import CountVectorizer
6+
from sklearn.feature_extraction.text import TfidfVectorizer
7+
from sklearn.metrics.pairwise import cosine_similarity
8+
from keras.preprocessing.sequence import pad_sequences
9+
from sklearn.preprocessing import LabelEncoder
10+
import numpy as np
11+
import pandas as pd
12+
13+
14+
# Define the custom loss function
15+
def custom_sparse_softmax_cross_entropy(labels, logits):
16+
return tf.compat.v1.losses.sparse_softmax_cross_entropy(labels, logits)
17+
18+
# Load the trained model using the custom loss function
19+
loaded_model = load_model('trained_model.keras', custom_objects={'custom_sparse_softmax_cross_entropy': custom_sparse_softmax_cross_entropy})
20+
21+
df = pd.read_csv('processed_dataset.csv')
22+
23+
# Load the processed_texts list
24+
with open('processed_texts.pkl', 'rb') as f:
25+
processed_texts = pickle.load(f)
26+
27+
# Load the tokenizer
28+
with open('tokenizer.pkl', 'rb') as token_file:
29+
tokenizer = pickle.load(token_file)
30+
31+
label_encoder = LabelEncoder()
32+
33+
# Assuming 'new_texts' is a list of new messages
34+
new_texts = ["UPI Bank account is credited with RS.25.00 on 25-Aug-2023","credit INR refund 100","Refund Processed: Refund of Rs. 237.0 for favoru Household wrap ... is successfully transferred and will be credited to your account by Oct 04, 2023.", "UPI mandate has been successfully created towards TATA TECHNOLOGIES LI for INR 15000.00. Funds blocked from A/c no. XX8926. 12e5d61d2ac145738241fbf117bb295c@okaxis - Axis Bank","Dear Player, Rs.10,000* is credited to your RummyTime a/c Ref Id: RT210XX Download the app & make your 1st deposit now - http://gmg.im/bKSfALT&C Apply"]
35+
36+
similarity_threshold = 0.7
37+
38+
for text in new_texts:
39+
# Preprocess the new text using spaCy
40+
preprocessed_new_text = preprocess_text(text)
41+
42+
# Calculate similarity between the new text and each text in the dataset using TF-IDF
43+
vectorizer = TfidfVectorizer()
44+
tfidf_matrix = vectorizer.fit_transform([preprocessed_new_text] + processed_texts)
45+
similarity_scores = cosine_similarity(tfidf_matrix)[0][1:]
46+
47+
# Predictions
48+
new_sequences = tokenizer.texts_to_sequences([preprocessed_new_text])
49+
new_padded_sequences = pad_sequences(new_sequences, padding='post')
50+
predictions = loaded_model.predict(new_padded_sequences)
51+
predicted_labels = [label for label in predictions.argmax(axis=1)]
52+
53+
# Inverse transform predicted labels to original class labels
54+
# Ensure that you have fitted the LabelEncoder before transforming
55+
label_encoder.fit(df['label'])
56+
predicted_class_labels = label_encoder.inverse_transform(predicted_labels)
57+
58+
# Check relevance and print the result
59+
is_relevant = any(score >= similarity_threshold for score in similarity_scores)
60+
relevance_status = "Relevant" if is_relevant else "Irrelevant"
61+
print(f"Text: {text} | Predicted Label: {predicted_class_labels[0]} | Relevance: {relevance_status}")
62+

testmodels/testing1.py

+101
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
import tensorflow as tf
2+
from keras.models import load_model
3+
from keras.preprocessing.text import Tokenizer
4+
from keras.preprocessing.sequence import pad_sequences
5+
from sklearn.preprocessing import LabelEncoder
6+
from sklearn.feature_extraction.text import CountVectorizer
7+
from sklearn.metrics.pairwise import cosine_similarity
8+
from nltk.tokenize import word_tokenize
9+
from nltk.corpus import stopwords
10+
from nltk.stem import PorterStemmer
11+
import numpy as np
12+
13+
def preprocess_text(text):
14+
# Remove punctuation and convert to lowercase
15+
text = ''.join([char.lower() for char in text if char.isalnum() or char.isspace()])
16+
17+
# Tokenization
18+
tokens = word_tokenize(text)
19+
20+
# Remove stopwords
21+
stop_words = set(stopwords.words('english'))
22+
tokens = [word for word in tokens if word not in stop_words]
23+
24+
# Stemming
25+
stemmer = PorterStemmer()
26+
tokens = [stemmer.stem(word) for word in tokens]
27+
28+
return ' '.join(tokens)
29+
30+
def check_relevance(new_text, dataset_texts, similarity_threshold=0.5):
31+
# Preprocess the new text
32+
preprocessed_new_text = preprocess_text(new_text)
33+
34+
# Preprocess each text in the dataset
35+
preprocessed_dataset_texts = [preprocess_text(text) for text in dataset_texts]
36+
37+
# Calculate similarity between the new text and each text in the dataset
38+
vectorizer = CountVectorizer().fit_transform([preprocessed_new_text] + preprocessed_dataset_texts)
39+
similarity_matrix = cosine_similarity(vectorizer)
40+
41+
# Get the similarity scores
42+
similarity_scores = similarity_matrix[0][1:]
43+
44+
# Check if any text in the dataset is similar to the new text
45+
return any(score >= similarity_threshold for score in similarity_scores)
46+
47+
# Load the label encoder
48+
label_encoder = LabelEncoder()
49+
label_encoder.classes_ = np.load('label_encoder_classes.npy', allow_pickle=True)
50+
51+
# Load the tokenizer
52+
tokenizer = Tokenizer(oov_token='<OOV>')
53+
tokenizer.word_index = np.load('tokenizer_word_index.npy', allow_pickle=True).item()
54+
55+
# Redefine custom loss function with the same structure as during training
56+
def custom_loss(labels, logits):
57+
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits)
58+
59+
# Load the model with the correct custom loss function
60+
loaded_model = load_model('trained_model.h5', custom_objects={'custom_loss': custom_loss})
61+
62+
texts = [
63+
"Debit INR 500.00 A/c no. XX8926 12-10-23 20:02:19 UPI/P2A/328546155288/ANURAG JAIN SMS BLOCKUPI Cust ID to 01351860002, if not you - Axis Bank",
64+
"Debit INR 109.00 A/c no. XX8926 27-01-24 11:36:57 UPI/P2M/6321837696198/Add Money to Wallet SMS BLOCKUPI Cust ID to 919951860002, if not you - Axis Bank",
65+
"INR 5590.00 credited to A/c no. XX8926 on 09-11-23 at 11:59:28 IST. Info- UPI/P2A/334365332111/ANURAG JAIN/Axis Bank - Axis Bank",
66+
"INR 216.35 credited to A/c no. XX8926 on 06-01-24 at 07:32:16 IST. Info- NEFT/CMS333334641/NEXTBIL. Avl Bal- INR 33478.22 - Axis Bank",
67+
"Your JPB A/c xxxx0956 is credited with Rs.25.00 on 25-Aug-2023. Your current account balance is Rs.25.",
68+
"IRCTC CF has requested money on Google Pay UPI app. On approving, INR 1033.60 will be debited from your A/c - Axis Bank",
69+
"You have received UPI mandate collect request from TATA TECHNOLOGIES LI for INR 15000.00. Log into Google Pay app to authorize - Axis Bank",
70+
"SOURAV CHANDRA DEY has requested money from you on Google Pay. On approving the request, INR 31.00 will be debited from your A/c - Axis Bank",
71+
"Flipkart Refund Processed: Refund of Rs. 237.0 for favoru Household wrap ... is successfully transferred and will be credited to your account by Oct 04, 2023.",
72+
"UPI mandate has been successfully created towards TATA TECHNOLOGIES LI for INR 15000.00. Funds blocked from A/c no. XX8926. 12e5d61d2ac145738241fbf117bb295c@okaxis - Axis Bank"
73+
]
74+
75+
# Assuming 'new_texts' is a list of new messages
76+
new_texts = ["debit text here asjkxbsa axbjsa xjasbx xasgxya yxyagsvxtyasf 61t72t7172 ",
77+
"credit INR refund 100",
78+
"Refund Processed: Refund of Rs. 237.0 for favoru Household wrap ... is successfully transferred and will be credited to your account by Oct 04, 2023.",
79+
"UPI mandate has been successfully created towards TATA TECHNOLOGIES LI for INR 15000.00. Funds blocked from A/c no. XX8926. 12e5d61d2ac145738241fbf117bb295c@okaxis - Axis Bank",
80+
"Dear Player, Rs.10,000* is credited to your RummyTime a/c Ref Id: RT210XX Download the app & make your 1st deposit now - http://gmg.im/bKSfALT&C Apply"]
81+
82+
# Check relevance and print the result for new texts
83+
for text in new_texts:
84+
# Preprocess the new text
85+
preprocessed_new_text = preprocess_text(text)
86+
87+
# Tokenize and pad the new text
88+
new_sequences = tokenizer.texts_to_sequences([preprocessed_new_text])
89+
new_padded_sequences = pad_sequences(new_sequences, padding='post')
90+
91+
# Predictions
92+
predictions = loaded_model.predict(new_padded_sequences)
93+
predicted_labels = [label for label in predictions.argmax(axis=1)]
94+
95+
# Inverse transform predicted labels to original class labels
96+
predicted_class_labels = label_encoder.inverse_transform(predicted_labels)
97+
98+
# Check relevance and print the result
99+
is_relevant = check_relevance(preprocessed_new_text, texts)
100+
relevance_status = "Relevant" if is_relevant else "Irrelevant"
101+
print(f"Text: {text} | Predicted Label: {predicted_class_labels[0]} | Relevance: {relevance_status}")

tokenizer.pkl

2.68 KB
Binary file not shown.

tokenizer_config.json

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{"num_words": null, "filters": "!\"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n", "lower": true, "split": " ", "char_level": false, "oov_token": "<OOV>", "document_count": 10, "word_counts": "{\"debit\": 4, \"inr\": 9, \"50000\": 1, \"ac\": 8, \"xx8926\": 5, \"121023\": 1, \"200219\": 1, \"upip2a328546155288anurag\": 1, \"jain\": 1, \"sm\": 2, \"blockupi\": 2, \"cust\": 2, \"id\": 2, \"01351860002\": 1, \"axi\": 8, \"bank\": 9, \"10900\": 1, \"270124\": 1, \"113657\": 1, \"upip2m6321837696198add\": 1, \"money\": 3, \"wallet\": 1, \"919951860002\": 1, \"559000\": 1, \"credit\": 4, \"091123\": 1, \"115928\": 1, \"ist\": 2, \"info\": 2, \"upip2a334365332111anurag\": 1, \"jainaxi\": 1, \"21635\": 1, \"060124\": 1, \"073216\": 1, \"neftcms333334641nextbil\": 1, \"avl\": 1, \"bal\": 1, \"3347822\": 1, \"jpb\": 1, \"xxxx0956\": 1, \"rs2500\": 1, \"25aug2023\": 1, \"current\": 1, \"account\": 2, \"balanc\": 1, \"rs25\": 1, \"irctc\": 1, \"cf\": 1, \"request\": 4, \"googl\": 3, \"pay\": 3, \"upi\": 3, \"app\": 2, \"approv\": 2, \"103360\": 1, \"receiv\": 1, \"mandat\": 2, \"collect\": 1, \"tata\": 2, \"technolog\": 2, \"li\": 2, \"1500000\": 2, \"log\": 1, \"author\": 1, \"sourav\": 1, \"chandra\": 1, \"dey\": 1, \"3100\": 1, \"flipkart\": 1, \"refund\": 2, \"process\": 1, \"rs\": 1, \"2370\": 1, \"favoru\": 1, \"household\": 1, \"wrap\": 1, \"success\": 2, \"transfer\": 1, \"oct\": 1, \"04\": 1, \"2023\": 1, \"creat\": 1, \"toward\": 1, \"fund\": 1, \"block\": 1, \"12e5d61d2ac145738241fbf117bb295cokaxi\": 1}", "word_docs": "{\"inr\": 8, \"200219\": 1, \"ac\": 8, \"blockupi\": 2, \"50000\": 1, \"id\": 2, \"jain\": 1, \"upip2a328546155288anurag\": 1, \"01351860002\": 1, \"axi\": 8, \"xx8926\": 5, \"cust\": 2, \"sm\": 2, \"debit\": 4, \"121023\": 1, \"bank\": 8, \"113657\": 1, \"money\": 3, \"270124\": 1, \"919951860002\": 1, \"10900\": 1, \"wallet\": 1, \"upip2m6321837696198add\": 1, \"info\": 2, \"091123\": 1, \"jainaxi\": 1, \"upip2a334365332111anurag\": 1, \"559000\": 1, \"115928\": 1, \"ist\": 2, \"credit\": 4, \"neftcms333334641nextbil\": 1, \"21635\": 1, \"060124\": 1, \"avl\": 1, \"073216\": 1, \"bal\": 1, \"3347822\": 1, \"balanc\": 1, \"account\": 2, \"25aug2023\": 1, \"current\": 1, \"rs2500\": 1, \"jpb\": 1, \"rs25\": 1, \"xxxx0956\": 1, \"approv\": 2, \"103360\": 1, \"pay\": 3, \"googl\": 3, \"upi\": 3, \"app\": 2, \"request\": 3, \"cf\": 1, \"irctc\": 1, \"collect\": 1, \"log\": 1, \"tata\": 2, \"1500000\": 2, \"mandat\": 2, \"technolog\": 2, \"receiv\": 1, \"li\": 2, \"author\": 1, \"dey\": 1, \"3100\": 1, \"chandra\": 1, \"sourav\": 1, \"transfer\": 1, \"refund\": 1, \"flipkart\": 1, \"process\": 1, \"oct\": 1, \"wrap\": 1, \"household\": 1, \"2023\": 1, \"success\": 2, \"2370\": 1, \"rs\": 1, \"favoru\": 1, \"04\": 1, \"fund\": 1, \"creat\": 1, \"toward\": 1, \"block\": 1, \"12e5d61d2ac145738241fbf117bb295cokaxi\": 1}", "index_docs": "{\"2\": 8, \"32\": 1, \"4\": 8, \"15\": 2, \"30\": 1, \"17\": 2, \"34\": 1, \"33\": 1, \"35\": 1, \"5\": 8, \"6\": 5, \"16\": 2, \"14\": 2, \"7\": 4, \"31\": 1, \"3\": 8, \"38\": 1, \"10\": 3, \"37\": 1, \"41\": 1, \"36\": 1, \"40\": 1, \"39\": 1, \"19\": 2, \"43\": 1, \"46\": 1, \"45\": 1, \"42\": 1, \"44\": 1, \"18\": 2, \"8\": 4, \"50\": 1, \"47\": 1, \"48\": 1, \"51\": 1, \"49\": 1, \"52\": 1, \"53\": 1, \"59\": 1, \"20\": 2, \"57\": 1, \"58\": 1, \"56\": 1, \"54\": 1, \"60\": 1, \"55\": 1, \"22\": 2, \"63\": 1, \"12\": 3, \"11\": 3, \"13\": 3, \"21\": 2, \"9\": 3, \"62\": 1, \"61\": 1, \"65\": 1, \"66\": 1, \"24\": 2, \"27\": 2, \"23\": 2, \"25\": 2, \"64\": 1, \"26\": 2, \"67\": 1, \"70\": 1, \"71\": 1, \"69\": 1, \"68\": 1, \"79\": 1, \"28\": 1, \"72\": 1, \"73\": 1, \"80\": 1, \"78\": 1, \"77\": 1, \"82\": 1, \"29\": 2, \"75\": 1, \"74\": 1, \"76\": 1, \"81\": 1, \"85\": 1, \"83\": 1, \"84\": 1, \"86\": 1, \"87\": 1}", "index_word": "{\"1\": \"<OOV>\", \"2\": \"inr\", \"3\": \"bank\", \"4\": \"ac\", \"5\": \"axi\", \"6\": \"xx8926\", \"7\": \"debit\", \"8\": \"credit\", \"9\": \"request\", \"10\": \"money\", \"11\": \"googl\", \"12\": \"pay\", \"13\": \"upi\", \"14\": \"sm\", \"15\": \"blockupi\", \"16\": \"cust\", \"17\": \"id\", \"18\": \"ist\", \"19\": \"info\", \"20\": \"account\", \"21\": \"app\", \"22\": \"approv\", \"23\": \"mandat\", \"24\": \"tata\", \"25\": \"technolog\", \"26\": \"li\", \"27\": \"1500000\", \"28\": \"refund\", \"29\": \"success\", \"30\": \"50000\", \"31\": \"121023\", \"32\": \"200219\", \"33\": \"upip2a328546155288anurag\", \"34\": \"jain\", \"35\": \"01351860002\", \"36\": \"10900\", \"37\": \"270124\", \"38\": \"113657\", \"39\": \"upip2m6321837696198add\", \"40\": \"wallet\", \"41\": \"919951860002\", \"42\": \"559000\", \"43\": \"091123\", \"44\": \"115928\", \"45\": \"upip2a334365332111anurag\", \"46\": \"jainaxi\", \"47\": \"21635\", \"48\": \"060124\", \"49\": \"073216\", \"50\": \"neftcms333334641nextbil\", \"51\": \"avl\", \"52\": \"bal\", \"53\": \"3347822\", \"54\": \"jpb\", \"55\": \"xxxx0956\", \"56\": \"rs2500\", \"57\": \"25aug2023\", \"58\": \"current\", \"59\": \"balanc\", \"60\": \"rs25\", \"61\": \"irctc\", \"62\": \"cf\", \"63\": \"103360\", \"64\": \"receiv\", \"65\": \"collect\", \"66\": \"log\", \"67\": \"author\", \"68\": \"sourav\", \"69\": \"chandra\", \"70\": \"dey\", \"71\": \"3100\", \"72\": \"flipkart\", \"73\": \"process\", \"74\": \"rs\", \"75\": \"2370\", \"76\": \"favoru\", \"77\": \"household\", \"78\": \"wrap\", \"79\": \"transfer\", \"80\": \"oct\", \"81\": \"04\", \"82\": \"2023\", \"83\": \"creat\", \"84\": \"toward\", \"85\": \"fund\", \"86\": \"block\", \"87\": \"12e5d61d2ac145738241fbf117bb295cokaxi\"}", "word_index": "{\"<OOV>\": 1, \"inr\": 2, \"bank\": 3, \"ac\": 4, \"axi\": 5, \"xx8926\": 6, \"debit\": 7, \"credit\": 8, \"request\": 9, \"money\": 10, \"googl\": 11, \"pay\": 12, \"upi\": 13, \"sm\": 14, \"blockupi\": 15, \"cust\": 16, \"id\": 17, \"ist\": 18, \"info\": 19, \"account\": 20, \"app\": 21, \"approv\": 22, \"mandat\": 23, \"tata\": 24, \"technolog\": 25, \"li\": 26, \"1500000\": 27, \"refund\": 28, \"success\": 29, \"50000\": 30, \"121023\": 31, \"200219\": 32, \"upip2a328546155288anurag\": 33, \"jain\": 34, \"01351860002\": 35, \"10900\": 36, \"270124\": 37, \"113657\": 38, \"upip2m6321837696198add\": 39, \"wallet\": 40, \"919951860002\": 41, \"559000\": 42, \"091123\": 43, \"115928\": 44, \"upip2a334365332111anurag\": 45, \"jainaxi\": 46, \"21635\": 47, \"060124\": 48, \"073216\": 49, \"neftcms333334641nextbil\": 50, \"avl\": 51, \"bal\": 52, \"3347822\": 53, \"jpb\": 54, \"xxxx0956\": 55, \"rs2500\": 56, \"25aug2023\": 57, \"current\": 58, \"balanc\": 59, \"rs25\": 60, \"irctc\": 61, \"cf\": 62, \"103360\": 63, \"receiv\": 64, \"collect\": 65, \"log\": 66, \"author\": 67, \"sourav\": 68, \"chandra\": 69, \"dey\": 70, \"3100\": 71, \"flipkart\": 72, \"process\": 73, \"rs\": 74, \"2370\": 75, \"favoru\": 76, \"household\": 77, \"wrap\": 78, \"transfer\": 79, \"oct\": 80, \"04\": 81, \"2023\": 82, \"creat\": 83, \"toward\": 84, \"fund\": 85, \"block\": 86, \"12e5d61d2ac145738241fbf117bb295cokaxi\": 87}"}

trained_model.keras

687 KB
Binary file not shown.

0 commit comments

Comments
 (0)