File size: 4,695 Bytes
f3ddf7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15e7e2d
f3ddf7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import json
import os
import tarfile
import zipfile
import gzip
import requests
import gdown
from glob import glob


def wget(url, cache_dir: str = './cache', gdrive_filename: str = None):
    """ wget and uncompress data_iterator """
    path = _wget(url, cache_dir, gdrive_filename=gdrive_filename)
    if path.endswith('.tar.gz') or path.endswith('.tgz') or path.endswith('.tar'):
        if path.endswith('.tar'):
            tar = tarfile.open(path)
        else:
            tar = tarfile.open(path, "r:gz")
        tar.extractall(cache_dir)
        tar.close()
        os.remove(path)
    elif path.endswith('.zip'):
        with zipfile.ZipFile(path, 'r') as zip_ref:
            zip_ref.extractall(cache_dir)
        os.remove(path)
    elif path.endswith('.gz'):
        with gzip.open(path, 'rb') as f:
            with open(path.replace('.gz', ''), 'wb') as f_write:
                f_write.write(f.read())
        os.remove(path)


def _wget(url: str, cache_dir, gdrive_filename: str = None):
    """ get data from web """
    os.makedirs(cache_dir, exist_ok=True)
    if url.startswith('https://drive.google.com'):
        assert gdrive_filename is not None, 'please provide fileaname for gdrive download'
        return gdown.download(url, f'{cache_dir}/{gdrive_filename}', quiet=False)
    filename = os.path.basename(url)
    with open(f'{cache_dir}/{filename}', "wb") as f:
        r = requests.get(url)
        f.write(r.content)
    return f'{cache_dir}/{filename}'


def get_data(n_sample: int = 10, v_rate: float = 0.2, n_sample_max: int = 10):
    assert n_sample <= n_sample_max
    cache_dir = 'cache'
    os.makedirs(cache_dir, exist_ok=True)

    path_answer = f'{cache_dir}/Phase2Answers'
    path_scale = f'{cache_dir}/Phase2AnswersScaled'
    url = 'https://drive.google.com/u/0/uc?id=0BzcZKTSeYL8VYWtHVmxUR3FyUmc&export=download'
    filename = 'SemEval-2012-Platinum-Ratings.tar.gz'
    if not (os.path.exists(path_scale) and os.path.exists(path_answer)):
        wget(url, gdrive_filename=filename, cache_dir=cache_dir)
    files_answer = [os.path.basename(i) for i in glob(f'{path_answer}/*.txt')]
    files_scale = [os.path.basename(i) for i in glob(f'{path_scale}/*.txt')]
    assert files_answer == files_scale, f'files are not matched: {files_scale} vs {files_answer}'
    all_positive_v = {}
    all_negative_v = {}
    all_positive_t = {}
    all_negative_t = {}
    for i in files_scale:
        relation_id = i.split('-')[-1].replace('.txt', '')
        relation_id = f"{relation_id[:-1]}/{relation_id[-1]}"
        with open(f'{path_answer}/{i}', 'r') as f:
            lines_answer = [l.replace('"', '').split('\t') for l in f.read().split('\n') if not l.startswith('#') and len(l)]
            relation_type = list(set(list(zip(*lines_answer))[-1]))
            assert len(relation_type) == 1, relation_type
        with open(f'{path_scale}/{i}', 'r') as f:
            lines_scale = [[float(l[:5]), l[6:].replace('"', '')] for l in f.read().split('\n')
                           if not l.startswith('#') and len(l)]
            lines_scale = sorted(lines_scale, key=lambda x: x[0])
            _negative = [tuple(i.split(':')) for i in list(zip(*list(filter(lambda x: x[0] < 0, lines_scale[:n_sample_max]))))[1]]
            _positive = [tuple(i.split(':')) for i in list(zip(*list(filter(lambda x: x[0] > 0, lines_scale[-n_sample_max:]))))[1]]
            v_negative = _negative[::int(len(_negative) * (1 - v_rate))]
            v_positive = _positive[::int(len(_positive) * (1 - v_rate))]
            t_negative = [i for i in _negative if i not in v_negative]
            t_positive = [i for i in _positive if i not in v_positive]

            all_negative_v[relation_id] = v_negative
            all_positive_v[relation_id] = v_positive
            all_negative_t[relation_id] = t_negative[:n_sample]
            all_positive_t[relation_id] = t_positive[-n_sample:]

    return (all_positive_t, all_negative_t), (all_positive_v, all_negative_v)


if __name__ == '__main__':
    (all_positive_t, all_negative_t), (all_positive_v, all_negative_v) = get_data(n_sample=10, v_rate=0.2, n_sample_max=10)
    os.makedirs('data', exist_ok=True)

    keys = all_positive_t.keys()
    with open("data/train.jsonl", "w") as f:
        for k in sorted(keys):
            f.write(json.dumps({"relation_type": k, "positives": all_positive_t[k], "negatives": all_negative_t[k]}))
            f.write("\n")

    keys = all_positive_v.keys()
    with open("data/valid.jsonl", "w") as f:
        for k in sorted(keys):
            f.write(json.dumps({"relation_type": k, "positives": all_positive_v[k], "negatives": all_negative_v[k]}))
            f.write("\n")