# Your data text = "in3x,net,watch,14zwhrd6,dildo,18"

# Tokenize (simple split) tokens = text.split(',')

from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer

In3x,net,watch,14zwhrd6,dildo,18 -

# Your data text = "in3x,net,watch,14zwhrd6,dildo,18"

# Tokenize (simple split) tokens = text.split(',') in3x,net,watch,14zwhrd6,dildo,18

from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer # Your data text = "in3x