|
| 1 | +import torch |
| 2 | +from torch.utils.data import Dataset |
| 3 | +import fsspec |
| 4 | +from dataclasses import dataclass |
| 5 | + |
| 6 | +""" |
| 7 | +Adapted from https://github.com/karpathy/minGPT/blob/master/projects/chargpt/chargpt.py |
| 8 | +""" |
| 9 | + |
| 10 | +@dataclass |
| 11 | +class DataConfig: |
| 12 | + path: str = None |
| 13 | + block_size: int = None |
| 14 | + train_split: float = None |
| 15 | + truncate: float = 1.0 |
| 16 | + |
| 17 | +class CharDataset(Dataset): |
| 18 | + |
| 19 | + def __init__(self, data_cfg: DataConfig): #data_path: str, block_size): |
| 20 | + data = fsspec.open(data_cfg.path).open().read().decode('utf-8') |
| 21 | + data = data[ : int(len(data) * data_cfg.truncate)] |
| 22 | + |
| 23 | + chars = sorted(list(set(data))) |
| 24 | + data_size, vocab_size = len(data), len(chars) |
| 25 | + print('Data has %d characters, %d unique.' % (data_size, vocab_size)) |
| 26 | + |
| 27 | + self.stoi = {ch: i for i, ch in enumerate(chars)} |
| 28 | + self.itos = {i: ch for i, ch in enumerate(chars)} |
| 29 | + self.block_size = data_cfg.block_size |
| 30 | + self.vocab_size = vocab_size |
| 31 | + self.data = data |
| 32 | + |
| 33 | + def __len__(self): |
| 34 | + return len(self.data) - self.block_size |
| 35 | + |
| 36 | + def __getitem__(self, idx): |
| 37 | + # grab a chunk of (block_size + 1) characters from the data |
| 38 | + chunk = self.data[idx:idx + self.block_size + 1] |
| 39 | + # encode every character to an integer |
| 40 | + dix = [self.stoi[s] for s in chunk] |
| 41 | + x = torch.tensor(dix[:-1], dtype=torch.long) |
| 42 | + y = torch.tensor(dix[1:], dtype=torch.long) |
| 43 | + return x, y |
0 commit comments