Berom0227 commited on
Commit
5cfdabd
·
verified ·
1 Parent(s): 36459b8

Upload scripts/sample_atomic_commites.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/sample_atomic_commites.py +7 -11
scripts/sample_atomic_commites.py CHANGED
@@ -13,7 +13,7 @@ from typing import Dict, List, Set, Tuple
13
  RANDOM_SEED: int = 42
14
 
15
  # Processing configuration
16
- CONVENTIONAL_COMMIT_TYPES: List[str] = ["feat", "fix", "refactor", "test", "docs", "build", "cicd"]
17
  SAMPLES_PER_TYPE: int = 50
18
  TARGET_TOKEN_LIMIT: int = 12288 # 16384 - 4096
19
  ENCODING_MODEL: str = "cl100k_base" # GPT-4 encoding
@@ -30,9 +30,6 @@ OUTPUT_COLUMNS: List[str] = [
30
  COLUMN_SHA,
31
  ]
32
 
33
- # Data transformation constants
34
- CI_TO_CICD_REPLACEMENT: str = "cicd"
35
-
36
  # File paths
37
  CCS_SOURCE_PATH: str = "data/CCS Dataset.csv"
38
  SAMPLED_CSV_PATH: str = "data/sampled_ccs_dataset.csv"
@@ -41,14 +38,13 @@ DIFF_OUTPUT_DIR: str = "data/types"
41
 
42
 
43
  def normalize_dataset(df: pd.DataFrame) -> pd.DataFrame:
44
- """Normalize CI commit type labels to CICD for consistent categorization."""
45
  df[COLUMN_ANNOTATED_TYPE] = (
46
  df[COLUMN_ANNOTATED_TYPE]
47
  .str.lower()
48
  .str.strip()
49
- .replace("ci", CI_TO_CICD_REPLACEMENT)
50
  )
51
- print("Applied CI -> CICD normalization")
52
  return df
53
 
54
 
@@ -226,7 +222,7 @@ def main() -> None:
226
  1. Load dataset, existing SHAs and type counts for deduplication and sampling
227
  2. Remove excluded commits by SHA
228
  3. Remove existing commits to prevent duplicates
229
- 4. Normalize CI commit types to CICD
230
  5. Filter commits exceeding token limits
231
  6. Sample needed amounts per type to reach target
232
  7. Save results and extract individual diff files (new samples only)
@@ -248,9 +244,9 @@ def main() -> None:
248
  print("\nStep 3: Removing existing commits")
249
  ccs_df = remove_existing_commits(ccs_df, existing_shas)
250
 
251
- # Step 4: Apply CI->CICD normalization
252
- # print("\nStep 4: Applying CI->CICD normalization")
253
- # ccs_df = normalize_dataset(ccs_df)
254
 
255
 
256
  # Step 5: Apply token-based filtering
 
13
  RANDOM_SEED: int = 42
14
 
15
  # Processing configuration
16
+ CONVENTIONAL_COMMIT_TYPES: List[str] = ["feat", "fix", "refactor", "test", "docs", "build", "ci"]
17
  SAMPLES_PER_TYPE: int = 50
18
  TARGET_TOKEN_LIMIT: int = 12288 # 16384 - 4096
19
  ENCODING_MODEL: str = "cl100k_base" # GPT-4 encoding
 
30
  COLUMN_SHA,
31
  ]
32
 
 
 
 
33
  # File paths
34
  CCS_SOURCE_PATH: str = "data/CCS Dataset.csv"
35
  SAMPLED_CSV_PATH: str = "data/sampled_ccs_dataset.csv"
 
38
 
39
 
40
  def normalize_dataset(df: pd.DataFrame) -> pd.DataFrame:
41
+ """Normalize commit type labels for consistent categorization."""
42
  df[COLUMN_ANNOTATED_TYPE] = (
43
  df[COLUMN_ANNOTATED_TYPE]
44
  .str.lower()
45
  .str.strip()
 
46
  )
47
+ print("Applied normalization")
48
  return df
49
 
50
 
 
222
  1. Load dataset, existing SHAs and type counts for deduplication and sampling
223
  2. Remove excluded commits by SHA
224
  3. Remove existing commits to prevent duplicates
225
+ 4. Normalize commit types
226
  5. Filter commits exceeding token limits
227
  6. Sample needed amounts per type to reach target
228
  7. Save results and extract individual diff files (new samples only)
 
244
  print("\nStep 3: Removing existing commits")
245
  ccs_df = remove_existing_commits(ccs_df, existing_shas)
246
 
247
+ # Step 4: Apply normalized types
248
+ print("\nStep 4: Applying normalization")
249
+ ccs_df = normalize_dataset(ccs_df)
250
 
251
 
252
  # Step 5: Apply token-based filtering