Every line of 'sklearn cosine similarity' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.
10 def cosine_similarity(x, y): 11 numerator = sum(a * b for a, b in zip(x, y)) 12 denominator = square_rooted(x) * square_rooted(y) 13 try: 14 return numerator / float(denominator) 15 except ZeroDivisionError: 16 return 0.0
45 def test_cosine(self): 46 """Do a quick basic test for index/search functionality""" 47 data = [ 48 'hello world', 49 'oh hello there', 50 'Play it', 51 'Play it again Sam', 52 ] 53 54 features = [dict([(x, 1) for x in f.split()]) for f in data] 55 features = DictVectorizer().fit_transform(features) 56 57 cluster_index = ci.ClusterIndex(features, data) 58 59 ret = cluster_index.search(features, k=1, k_clusters=1, 60 return_distance=False) 61 self.assertEqual([[d] for d in data], ret)
81 def test_cosine_identical(self): 82 cosine = CosineTextSimilarity(self.ilist) 83 cosine_sim = cosine(self.ilist[0], self.ilist[0]) 84 self.assertAlmostEqual(cosine_sim, 1, places=5)
158 def cosine_distance(s1, s2, k): 159 """Compute the cosine difference of the strings as kmer vectors 160 """ 161 vec1, vec2 = to_kmer_vector(s1, s2, k) 162 163 intersection = set(vec1.keys()) & set(vec2.keys()) 164 numerator = sum([vec1[x] * vec2[x] for x in intersection]) 165 166 sum1 = sum([vec1[x] ** 2 for x in vec1.keys()]) 167 sum2 = sum([vec2[x] ** 2 for x in vec2.keys()]) 168 denominator = math.sqrt(sum1) * math.sqrt(sum2) 169 if not denominator: 170 return 0.0 171 else: 172 return float(numerator) / denominator
452 def cosine(vec1, vec2): 453 vec1=debug_print(vec1, 'vec1') 454 vec2=debug_print(vec2, 'vec2') 455 norm_uni_l=T.sqrt((vec1**2).sum()) 456 norm_uni_r=T.sqrt((vec2**2).sum()) 457 458 dot=T.dot(vec1,vec2.T) 459 460 simi=debug_print(dot/(norm_uni_l*norm_uni_r), 'uni-cosine') 461 return simi.reshape((1,1))
1184 @keras_export( 1185 'keras.losses.cosine_similarity', 1186 v1=[ 1187 'keras.metrics.cosine_proximity', 1188 'keras.metrics.cosine', 1189 'keras.losses.cosine_proximity', 1190 'keras.losses.cosine', 1191 'keras.losses.cosine_similarity', 1192 ]) 1193 def cosine_similarity(y_true, y_pred, axis=-1): 1194 """Computes the cosine similarity between labels and predictions. 1195 1196 Note that it is a negative quantity between -1 and 0, where 0 indicates 1197 orthogonality and values closer to -1 indicate greater similarity. This makes 1198 it usable as a loss function in a setting where you try to maximize the 1199 proximity between predictions and targets. 1200 1201 `loss = -sum(y_true * y_pred)` 1202 1203 Args: 1204 y_true: Tensor of true targets. 1205 y_pred: Tensor of predicted targets. 1206 axis: Axis along which to determine similarity. 1207 1208 Returns: 1209 Cosine similarity tensor. 1210 """ 1211 y_true = nn.l2_normalize(y_true, axis=axis) 1212 y_pred = nn.l2_normalize(y_pred, axis=axis) 1213 return -math_ops.reduce_sum(y_true * y_pred, axis=axis)
307 def CosineSimilarity(v1, v2): 308 """ Implements the Cosine similarity metric. 309 This is the recommended metric in the LaSSI paper 310 311 **Arguments**: 312 313 - two vectors (sequences of bit ids) 314 315 **Returns**: a float. 316 317 **Notes** 318 319 - the vectors must be sorted 320 321 >>> print('%.3f'%CosineSimilarity( (1,2,3,4,10), (2,4,6) )) 322 0.516 323 >>> print('%.3f'%CosineSimilarity( (1,2,2,3,4), (2,2,4,5,6) )) 324 0.714 325 >>> print('%.3f'%CosineSimilarity( (1,2,2,3,4), (1,2,2,3,4) )) 326 1.000 327 >>> print('%.3f'%CosineSimilarity( (1,2,2,3,4), (5,6,7) )) 328 0.000 329 >>> print('%.3f'%CosineSimilarity( (1,2,2,3,4), () )) 330 0.000 331 332 """ 333 d1 = Dot(v1, v1) 334 d2 = Dot(v2, v2) 335 denom = math.sqrt(d1 * d2) 336 if not denom: 337 res = 0.0 338 else: 339 numer = Dot(v1, v2) 340 res = numer / denom 341 return res