| |
| """The dataset corresponds to roughly 10M random users who visited the ShareChat + Moj app over three months. |
| We have sampled each user's activity to generate 10 impressions corresponding to each user. |
| Our target variable is whether there was an install for an app by the user or not. |
| """ |
|
|
|
|
| import csv |
| import json |
| import os |
| import glob |
| import polars as pl |
|
|
| import datasets |
|
|
|
|
| _CITATION = """\ |
| @incollection{agrawal2023recsys, |
| title={RecSys Challenge 2023 Dataset: Ads Recommendations in Online Advertising}, |
| author={Agrawal, Rahul and Brahme, Sarang and Maitra, Sourav and Srivastava, Abhishek and Irissappane, Athirai and Liu, Yong and Kalloori, Saikishore}, |
| booktitle={Proceedings of the Recommender Systems Challenge 2023}, |
| pages={1--3}, |
| year={2023} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| The dataset corresponds to roughly 10M random users who visited the ShareChat + Moj app over three months. |
| We have sampled each user's activity to generate 10 impressions corresponding to each user. |
| Our target variable is whether there was an install for an app by the user or not. |
| """ |
|
|
| _HOMEPAGE = "https://www.recsyschallenge.com/2023/" |
|
|
| |
| _LICENSE = "" |
|
|
|
|
| _URLS = { |
| "first_domain": "https://cdn.sharechat.com/2a161f8e_1679936280892_sc.zip", |
| } |
|
|
|
|
| class Sharechat(datasets.ArrowBasedBuilder): |
| """The dataset for RecSys Challenge 2024.""" |
|
|
| VERSION = datasets.Version("1.0.0") |
|
|
| |
| |
| |
|
|
| BUILDER_CONFIGS = [ |
| datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"), |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "first_domain" |
|
|
| def _info(self): |
| if self.config.name == "first_domain": |
| |
| |
| id_columns = [('f_0', datasets.Value("int64"))] |
| time_columns = [('f_1', datasets.Value("int8"))] |
| cat_columns = [(f'f_{i}', datasets.Value("int32")) for i in range(2, 33)] |
| binary_columns = [(f'f_{i}', datasets.Value("int8")) for i in range(33, 42)] |
| dense_columns = [(f'f_{i}', datasets.Value("float")) for i in range(42, 80)] |
| other_columns = [('is_clicked', datasets.Value("int8"))] |
| label_columns = [('is_installed', datasets.Value("int8"))] |
| all_columns = id_columns + time_columns + cat_columns + binary_columns + dense_columns + other_columns + label_columns |
| |
| features = datasets.Features(dict(all_columns)) |
| else: |
| raise NotImplementedError("This configuration is not implemented yet") |
| return datasets.DatasetInfo( |
| |
| description=_DESCRIPTION, |
| |
| features=features, |
| |
| |
| |
| |
| homepage=_HOMEPAGE, |
| |
| license=_LICENSE, |
| |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| |
| |
| |
| urls = _URLS[self.config.name] |
| data_dir = dl_manager.download_and_extract(urls) |
|
|
| id_columns = [('f_0', pl.Int64)] |
| time_columns = [('f_1', pl.Int8)] |
| cat_columns = [(f'f_{i}', pl.Int32) for i in range(2, 33)] |
| binary_columns = [(f'f_{i}', pl.Int8) for i in range(33, 42)] |
| dense_columns = [(f'f_{i}', pl.Float32) for i in range(42, 80)] |
| other_columns = [('is_clicked', pl.Int8)] |
| label_columns = [('is_installed', pl.Int8)] |
| all_columns = id_columns + time_columns + cat_columns + binary_columns + dense_columns + other_columns + label_columns |
| self.dtypes_dict = dict(all_columns) |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "filepaths": sorted(glob.glob(os.path.join(data_dir, "sharechat_recsys2023_data", "train", "*.csv"))), |
| "date_start": 45, |
| "date_end": 64, |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "filepaths": sorted(glob.glob(os.path.join(data_dir, "sharechat_recsys2023_data", "train", "*.csv"))), |
| "date_start": 65, |
| "date_end": 65, |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "filepaths": sorted(glob.glob(os.path.join(data_dir, "sharechat_recsys2023_data", "train", "*.csv"))), |
| "date_start": 66, |
| "date_end": 66, |
| }, |
| ), |
| ] |
|
|
| def _generate_tables(self, filepaths, date_start, date_end): |
| |
| |
| for id, filepath in enumerate(filepaths): |
| if self.config.name == "first_domain": |
| pa_table = ( |
| pl.scan_csv(filepath, separator='\t', dtypes=self.dtypes_dict) |
| .filter(pl.col("f_1").is_between(date_start, date_end)) |
| .sort("f_1", descending=False) |
| .with_columns(pl.col("f_1").mod(7).alias("f_1")) |
| .collect().to_arrow() |
| ) |
| yield id, pa_table |
| else: |
| raise NotImplementedError |
|
|
|
|
|
|
| |
| |
| |
| |
| |
| |
|
|