File size: 2,174 Bytes
58f2032
 
 
 
 
 
 
 
 
 
 
9940f8d
58f2032
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d8283b1
 
 
 
 
 
2b1309a
d8283b1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
@online{vardhan2024embeddings,
  title={A Comprehensive Guide to Word Embeddings in NLP},
  author={Vardhan, Harsh},
  year={2024},
  url={https://medium.com/@harsh.vardhan7695/a-comprehensive-guide-to-word-embeddings-in-nlp-ee3f9e4663ed},
  publisher={Medium}
}

@online{turing2022embeddings,
  title={A Guide on Word embeddings in NLP},
  author={Turing},
  year={2022},
  url={https://www.turing.com/kb/guide-on-word-embeddings-in-nlp}
}

@online{sarkar2018cbow,
  title={Implementing deep learning methods and feature engineering for text data: The Continuous Bag of Words (CBOW)},
  author={Sarkar, D.},
  url={https://www.kdnuggets.com/2018/04/implementing-deep-learning-methods-feature-engineering-text-data-cbow.html},
  publisher={KDnuggets}
}

@misc{word2vec,
  title={word2vec},
  url={https://code.google.com/archive/p/word2vec/},
  publisher={Google Code Archive}
}

@online{codeemporium2020bert,
  title={BERT Neural Network - EXPLAINED!},
  author={CodeEmporium},
  year={2020},
  url={https://www.youtube.com/watch?v=xI0HHN5XKDo},
  publisher={YouTube}
}

@article{devlin2018bert,
  title={BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding},
  author={Devlin, Jacob and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina},
  year={2018},
  url={https://arxiv.org/abs/1810.04805},
  publisher={arXiv}
}

@book{manning2024llm,
  title={Build a large language model (From scratch)},
  publisher={Manning Publications},
  url={https://www.manning.com/books/build-a-large-language-model-from-scratch}
}

@online{rasbt2024llm,
  title={LLMs-from-scratch},
  author={Rasbt},
  url={https://github.com/rasbt/LLMs-from-scratch/tree/main/ch02/03_bonus_embedding-vs-matmul},
  publisher={GitHub}
}

@online{chrishayuk2024embeddings,
  title={embeddings},
  author={Chrishayuk},
  url={https://github.com/chrishayuk/embeddings/tree/main},
  publisher={GitHub}
}

@misc{Mikolov_Chen_Corrado_Dean_2013, 
  title={Efficient estimation of word representations in vector space}, 
  url={https://arxiv.org/abs/1301.3781}, 
  journal={arXiv.org}, 
  author={Mikolov, Tomas and Chen, Kai and Corrado, Greg and Dean, Jeffrey}, 
  year={2013}
}