Dataset Viewer
Auto-converted to Parquet Duplicate
text
stringlengths
25
2.19k
embedding
listlengths
768
768
def get _ albums _ from _ artist ( self, artist, type = ( " album ", " single ", " appears _ on ", " compilation " ), market = none ) : q = { " include _ groups " : ", ". join ( type ), " market " : market or self. user _ market ( ), " limit " : 50 } url = " artists / { } / albums ". format ( artist ['id'] ) page = sel...
[ -0.7210893034934998, 0.12829485535621643, 0.4613657295703888, 0.7208927273750305, -0.12374326586723328, 0.3176180124282837, -0.3189370036125183, -0.9707204699516296, 0.11219105124473572, 0.9710135459899902, -0.8303022980690002, 0.23881420493125916, -0.6266999244689941, 0.030375206843018532...
def non _ dominated _ sort ( objectives ) : extended = np. tile ( objectives, ( objectives. shape [ 0 ], 1, 1 ) ) dominance = np. sum ( np. logical _ and ( np. all ( extended < = np. swapaxes ( extended, 0, 1 ), axis = 2 ), np. any ( extended < np. swapaxes ( extended, 0, 1 ), axis = 2 ) ), axis = 1 ) return objectives...
[ -0.330387145280838, -0.7441748976707458, 0.8873156905174255, 0.4243111312389374, -1.1130783557891846, 0.5127135515213013, 0.4697667062282562, -0.16032901406288147, 0.217493936419487, 0.5465676188468933, 0.43450066447257996, 0.11917673051357269, 0.06401162594556808, 0.4886234402656555, 0....
def extract _ article ( self ) : # get an article html = self. download _ page ( ) soup = beautifulsoup ( html, " html5lib " ) if not soup : raise valueerror ( " sorry, i could not parse that page properly " ) # get author, title and name author _ tag = soup. find ( rel = " author " ) self. author = author _ tag and au...
[ 0.1682806760072708, -0.4968503713607788, 0.7773685455322266, 0.06946185231208801, -0.6362085342407227, -0.6975380778312683, 0.006625432521104813, 0.40769317746162415, 0.11640220880508423, 0.5395914316177368, 0.3778519332408905, -0.13964024186134338, -0.4712356626987457, 0.27101677656173706...
def test _ atomic _ id _ pattern _ nistxml _ sv _ iv _ atomic _ id _ pattern _ 1 _ 4 ( mode, save _ output, output _ format ) : assert _ bindings ( schema = " nistdata / atomic / id / schema + instance / nistschema - sv - iv - atomic - id - pattern - 1. xsd ", instance = " nistdata / atomic / id / schema + instance / n...
[ 0.26570925116539, -0.39547160267829895, 0.3966532051563263, 0.4098685085773468, -0.014715109951794147, 0.08723331987857819, 0.47929471731185913, -0.31813618540763855, 0.5504741072654724, 0.6739025712013245, 0.039721645414829254, -0.08637847751379013, -0.28155457973480225, -0.02358929440379...
def test _ atomic _ language _ max _ length _ nistxml _ sv _ iv _ atomic _ language _ max _ length _ 1 _ 1 ( mode, save _ output, output _ format ) : assert _ bindings ( schema = " nistdata / atomic / language / schema + instance / nistschema - sv - iv - atomic - language - maxlength - 1. xsd ", instance = " nistdata /...
[ 0.19292712211608887, -0.11410900205373764, 0.2877945303916931, 0.9625077843666077, -0.5735697150230408, 0.2667210102081299, -0.16850778460502625, 0.3342224955558777, 0.7606223225593567, 0.906294047832489, -0.21966762840747833, -0.1751202940940857, -0.06951513141393661, 0.17697396874427795,...
def _ is _ colour _ valid ( self, tag : str ) : colour _ to _ check = tag. replace ('','' ). replace ('< ','' ). replace ('> ','' ). upper ( ) return colour _ to _ check in colour _ dict
[ -0.37476006150245667, -0.2495487481355667, 0.46739327907562256, -1.1725783348083496, -0.6964278221130371, 0.17038142681121826, -0.8503472805023193, 0.9233656525611877, -0.4795406460762024, -0.13225826621055603, -0.17895719408988953, -0.10144425928592682, -0.5208384394645691, 1.025472640991...
def connects ( r1, s1, r2, s2, topology ) : for pkt, out _ port in r1. apply ( r1. pattern ) : new _ switch, in _ port = topology. node [ s1 ] ['port'] [ out _ port ] if new _ switch = = s2 : if r2. pattern. intersects ( pkt ) : return true return false
[ -0.5510249733924866, -0.31934854388237, 0.6912877559661865, 0.353097140789032, 0.09364491701126099, 0.21935662627220154, 0.30992528796195984, -0.2231912463903427, -0.5550252795219421, 0.018142912536859512, 0.11319125443696976, -0.061421409249305725, -0.42954981327056885, -0.974733531475067...
def connect ( self, address = none, port = none ) : if not self. address : self. address = ( address, port ) elif not address and not port and not self. address : raise exception ( " address and port must be specified in " " constructor or in connect ( ) " ) self. control _ socket = ssl. wrap _ socket ( self. control _...
[ -0.033811915665864944, -0.07078375667333603, 0.742683470249176, -0.20651037991046906, -0.39802101254463196, -0.11448908597230911, -0.16608180105686188, 0.026089929044246674, 0.6819545030593872, -0.0428159236907959, -0.3324888050556183, 0.7802598476409912, -0.5060444474220276, -0.2018079757...
def reviewspellcurrentcharacter ( self, inputevent ) : self. _ reviewcurrentcharacter ( inputevent, 2 ) return true
[ -0.7980320453643799, -1.2879847288131714, 0.2279476374387741, -0.39663824439048767, -1.3075263500213623, 1.3654098510742188, 0.16395704448223114, -0.16433489322662354, -0.3237684667110443, 0.023717492818832397, -0.3682795464992523, 1.1627389192581177, -0.8761436343193054, -0.53150856494903...
def geo _ clustering _ dbscan ( df, min _ dist = 0. 6, min _ samples = 3 ) : x = df [ ['latitude ','longitude'] ]. copy ( ) distance _ matrix = squareform ( pdist ( x, ( lambda u, v : haversine ( u, v ) ) ) ) db = dbscan ( eps = min _ dist, min _ samples = min _ samples, metric ='precomputed') y _ db = db. fit _ predic...
[ -0.15294764935970306, 1.123950719833374, 0.6587475538253784, 0.36023181676864624, 0.32038959860801697, 0.5256088376045227, -0.24608203768730164, 0.18735072016716003, 0.05540258064866066, 0.10797619074583054, 0.05219558626413345, 0.632661759853363, -0.23298421502113342, -0.7631566524505615,...
def do _ postprocesses ( self, job ) : # denoise # if self. settings. denoise _ enabled : denoise = denoise ( job = job ) denoise _ proc = self. do _ process ( obj = denoise, job = job ) if not denoise _ proc : return false # multicolumnskew # if self. settings. denoise _ enabled and self. settings. multi _ column _ sk...
[ -0.02147260494530201, -0.2784251868724823, 0.8977808356285095, -0.6105776429176331, -0.5371809005737305, 0.10191179066896439, 0.17056357860565186, -1.3992860317230225, 0.14364834129810333, -0.2727648913860321, -0.41976818442344666, -0.5822306871414185, -0.5397249460220337, 0.85858094692230...
def poll _ for _ updates ( ) : logbuffer = next ( x for x in logging. getlogger ( ). handlers if isinstance ( x, logging. handlers. bufferinghandler ) ). buffer get _ props = lambda x : { k : v for k, v in x. _ _ dict _ _. iteritems ( ) if not k [ 0 ] = ='_'} old _ workflows = { workflow. id : get _ props ( workflow ) ...
[ -0.6892126202583313, -0.78375244140625, 0.7899293899536133, 0.20734918117523193, -0.2902581989765167, -0.44525909423828125, 0.16865871846675873, -1.4501473903656006, 0.32067564129829407, -0.035851966589689255, -0.026010602712631226, 0.20672066509723663, -0.30175304412841797, 0.385088443756...
End of preview. Expand in Data Studio

Pringled/cornstack-docs-tokenlearn Dataset Card

This dataset was created with Tokenlearn for training Model2Vec models on code retrieval. It contains mean token embeddings produced by nomic-ai/CodeRankEmbed, used as training targets for static embedding distillation.

The dataset contains code documents from CornStack across 6 programming languages (50,000 rows per language, 300,000 total).

Dataset Details

Field Value
Source CornStack (nomic-ai)
Embedding model nomic-ai/CodeRankEmbed
Embedding dimension 768
Languages Python, Java, PHP, Go, JavaScript, Ruby
Rows per language 50,000
Total rows 300,000
Field document

Source Datasets

Dataset Structure

Column Type Description
text string Truncated input text (tokenizer max length 512)
embedding list[float32] Mean token embedding from nomic-ai/CodeRankEmbed, excluding BOS/EOS tokens

Usage

Load a single language config:

from datasets import load_dataset

# Load Python code documents
dataset = load_dataset("Pringled/cornstack-docs-tokenlearn", name="python")

# Load all languages and concatenate
from datasets import concatenate_datasets
all_langs = concatenate_datasets([
    load_dataset("Pringled/cornstack-docs-tokenlearn", name=lang)["train"]
    for lang in ["python", "java", "php", "go", "javascript", "ruby"]
])

Creation

Featurized from CornStack using nomic-ai/CodeRankEmbed with mean token pooling (BOS/EOS excluded). Two sampling seeds (42 and 100) were used with a 10k streaming shuffle buffer to maximise diversity. Texts are truncated to 512 tokens.

Library Authors

Tokenlearn was developed by the Minish team consisting of Stephan Tulkens and Thomas van Dongen.

Citation

@software{minishlab2024model2vec,
  author       = {Stephan Tulkens and {van Dongen}, Thomas},
  title        = {Model2Vec: Fast State-of-the-Art Static Embeddings},
  year         = {2024},
  publisher    = {Zenodo},
  doi          = {10.5281/zenodo.17270888},
  url          = {https://github.com/MinishLab/model2vec},
  license      = {MIT}
}
Downloads last month
-

Models trained or fine-tuned on minishlab/tokenlearn-cornstack-docs-coderankembed