# Let's create a dummy dataset data = [' '.join(tokens)]
# Your data text = "in3x,net,watch,14zwhrd6,dildo,18"
# Vectorizer to convert text into a matrix of token counts vectorizer = CountVectorizer() count_features = vectorizer.fit_transform(data)
# TF-IDF transformer tfidf = TfidfTransformer() tfidf_features = tfidf.fit_transform(count_features)
In3x,net,watch,14zwhrd6,dildo,18
# Let's create a dummy dataset data = [' '.join(tokens)]
# Your data text = "in3x,net,watch,14zwhrd6,dildo,18" in3x,net,watch,14zwhrd6,dildo,18
# Vectorizer to convert text into a matrix of token counts vectorizer = CountVectorizer() count_features = vectorizer.fit_transform(data) # Let's create a dummy dataset data = [' '
# TF-IDF transformer tfidf = TfidfTransformer() tfidf_features = tfidf.fit_transform(count_features) in3x,net,watch,14zwhrd6,dildo,18