Skip to content

Commit 4da00aa

Browse files
authored
Merge pull request #2 from textiq/collectionBased
Added list factory and mapDb
2 parents 885ae26 + 57ffeeb commit 4da00aa

19 files changed

+1361
-875
lines changed

build.gradle

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -155,6 +155,9 @@ dependencies {
155155
jmh group: 'org.apache.commons', name: 'commons-compress', version: 'latest.release'
156156
jmh 'org.openjdk.jmh:jmh-generator-annprocess:latest.release' // for IntelliJ
157157

158+
// https://mvnrepository.com/artifact/org.mapdb/mapdb
159+
compile group: 'org.mapdb', name: 'mapdb', version: '3.0.8'
160+
158161
components.all { ComponentMetadataDetails details ->
159162
details.statusScheme = ['candidate', 'release']
160163
if (details.id.version =~ /(?i).+([-.])(CANDIDATE|RC|BETA|ALPHA).*/) {
@@ -164,7 +167,3 @@ dependencies {
164167
}
165168
}
166169
}
167-
168-
dependencyLocking {
169-
lockAllConfigurations()
170-
}

src/jmh/java/de/bwaldvogel/liblinear/LinearBenchmark.java

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import java.nio.file.Files;
66
import java.nio.file.Path;
77
import java.nio.file.Paths;
8+
import java.util.ArrayList;
89
import java.util.concurrent.TimeUnit;
910

1011
import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
@@ -35,12 +36,12 @@ public class LinearBenchmark {
3536
public void readProblem(DatasetParameters datasetParameters) throws Exception {
3637
Path trainingFile = getTrainingFile(datasetParameters.dataset);
3738
try (InputStream inputStream = getInputStream(trainingFile)) {
38-
Train.readProblem(inputStream, -1);
39+
Train.readProblem(new MemoryListFactory(), inputStream, -1);
3940
}
4041
}
4142

4243
@Benchmark
43-
public void train(BenchmarkParameters benchmarkParameters) {
44+
public void train(BenchmarkParameters benchmarkParameters) throws IllegalAccessException, InstantiationException {
4445
Linear.disableDebugOutput();
4546
Linear.train(benchmarkParameters.problem, new Parameter(benchmarkParameters.solverType, 1, 1e-3));
4647
}
@@ -65,7 +66,7 @@ public static class BenchmarkParameters {
6566
public void loadDataset() throws Exception {
6667
Path trainingFile = getTrainingFile(dataset);
6768
try (InputStream inputStream = getInputStream(trainingFile)) {
68-
problem = Train.readProblem(inputStream, -1);
69+
problem = Train.readProblem(new MemoryListFactory(), inputStream, -1);
6970
}
7071
}
7172

src/main/java/de/bwaldvogel/liblinear/Feature.java

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
package de.bwaldvogel.liblinear;
22

3+
import java.io.Serializable;
4+
35
/**
46
* @since 1.9
57
*/
6-
public interface Feature {
8+
public interface Feature extends Serializable {
79

810
int getIndex();
911

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
package de.bwaldvogel.liblinear;
2+
3+
import java.io.Serializable;
4+
5+
/**
6+
* Wrapper around feature array.
7+
*/
8+
9+
public class FeatureVector implements Serializable {
10+
public Feature[] getFeatures() {
11+
return features;
12+
}
13+
14+
private final Feature[] features;
15+
16+
public FeatureVector(Feature[] features) {
17+
this.features = features;
18+
}
19+
20+
}

src/main/java/de/bwaldvogel/liblinear/L2R_ErmFunction.java

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ abstract class L2R_ErmFunction implements Function {
1212
final boolean regularize_bias;
1313

1414
L2R_ErmFunction(Problem prob, Parameter parameter, double[] C) {
15-
int l = prob.l;
15+
int l = prob.getL();
1616

1717
this.prob = prob;
1818

@@ -24,23 +24,21 @@ abstract class L2R_ErmFunction implements Function {
2424

2525
void Xv(double[] v, double[] Xv) {
2626
int i;
27-
int l = prob.l;
28-
Feature[][] x = prob.x;
27+
int l = prob.getL();
2928

3029
for (i = 0; i < l; i++)
31-
Xv[i] = SparseOperator.dot(v, x[i]);
30+
Xv[i] = SparseOperator.dot(v, prob.getX(i));
3231
}
3332

3433
void XTv(double[] v, double[] XTv) {
35-
int l = prob.l;
34+
int l = prob.getL();
3635
int w_size = get_nr_variable();
37-
Feature[][] x = prob.x;
3836

3937
for (int i = 0; i < w_size; i++)
4038
XTv[i] = 0;
4139

4240
for (int i = 0; i < l; i++) {
43-
SparseOperator.axpy(v[i], x[i], XTv);
41+
SparseOperator.axpy(v[i], prob.getX(i), XTv);
4442
}
4543
}
4644

@@ -50,7 +48,7 @@ void XTv(double[] v, double[] XTv) {
5048
public double fun(double[] w) {
5149
int i;
5250
double f = 0;
53-
int l = prob.l;
51+
int l = prob.getL();
5452
int w_size = get_nr_variable();
5553

5654
wTw = 0;
@@ -77,7 +75,7 @@ public int get_nr_variable() {
7775
@Override
7876
public double linesearch_and_update(double[] w, double[] s, MutableDouble f, double[] g, double alpha) {
7977
int i;
80-
int l = prob.l;
78+
int l = prob.getL();
8179
double sTs = 0;
8280
double wTs = 0;
8381
double gTs = 0;

src/main/java/de/bwaldvogel/liblinear/L2R_L2_SvcFunction.java

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ class L2R_L2_SvcFunction extends L2R_ErmFunction {
77

88
public L2R_L2_SvcFunction(Problem prob, Parameter param, double[] C) {
99
super(prob, param, C);
10-
I = new int[prob.l];
10+
I = new int[prob.getL()];
1111
}
1212

1313
@Override
@@ -23,7 +23,7 @@ protected double C_times_loss(int i, double wx_i) {
2323
public void grad(double[] w, double[] g) {
2424
int i;
2525
double[] y = prob.y;
26-
int l = prob.l;
26+
int l = prob.getL();
2727
int w_size = get_nr_variable();
2828

2929
sizeI = 0;
@@ -46,7 +46,6 @@ public void grad(double[] w, double[] g) {
4646
@Override
4747
public void get_diag_preconditioner(double[] M) {
4848
int w_size = get_nr_variable();
49-
Feature[][] x = prob.x;
5049

5150
for (int i = 0; i < w_size; i++)
5251
M[i] = 1;
@@ -55,7 +54,7 @@ public void get_diag_preconditioner(double[] M) {
5554

5655
for (int i = 0; i < sizeI; i++) {
5756
int idx = I[i];
58-
for (Feature s : x[idx]) {
57+
for (Feature s : prob.getX(idx)) {
5958
M[s.getIndex() - 1] += s.getValue() * s.getValue() * C[idx] * 2;
6059
}
6160
}
@@ -65,12 +64,11 @@ public void get_diag_preconditioner(double[] M) {
6564
public void Hv(double[] s, double[] Hs) {
6665
int i;
6766
int w_size = get_nr_variable();
68-
Feature[][] x = prob.x;
6967

7068
for (i = 0; i < w_size; i++)
7169
Hs[i] = 0;
7270
for (i = 0; i < sizeI; i++) {
73-
Feature[] xi = x[I[i]];
71+
Feature[] xi = prob.getX(I[i]);
7472
double xTs = SparseOperator.dot(s, xi);
7573
xTs = C[I[i]] * xTs;
7674

@@ -85,12 +83,11 @@ public void Hv(double[] s, double[] Hs) {
8583
protected void subXTv(double[] v, double[] XTv) {
8684
int i;
8785
int w_size = get_nr_variable();
88-
Feature[][] x = prob.x;
8986

9087
for (i = 0; i < w_size; i++)
9188
XTv[i] = 0;
9289
for (i = 0; i < sizeI; i++)
93-
SparseOperator.axpy(v[i], x[I[i]], XTv);
90+
SparseOperator.axpy(v[i], prob.getX(I[i]), XTv);
9491
}
9592

9693
}

src/main/java/de/bwaldvogel/liblinear/L2R_L2_SvrFunction.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ else if (d > p)
2626
public void grad(double[] w, double[] g) {
2727
int i;
2828
double[] y = prob.y;
29-
int l = prob.l;
29+
int l = prob.getL();
3030
int w_size = get_nr_variable();
3131
double d;
3232

src/main/java/de/bwaldvogel/liblinear/L2R_LrFunction.java

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ class L2R_LrFunction extends L2R_ErmFunction {
66

77
L2R_LrFunction(Problem prob, Parameter param, double[] C) {
88
super(prob, param, C);
9-
int l = prob.l;
9+
int l = prob.getL();
1010
D = new double[l];
1111
}
1212

@@ -23,7 +23,7 @@ protected double C_times_loss(int i, double wx_i) {
2323
public void grad(double[] w, double[] g) {
2424
int i;
2525
double[] y = prob.y;
26-
int l = prob.l;
26+
int l = prob.getL();
2727
int w_size = get_nr_variable();
2828

2929
for (i = 0; i < l; i++) {
@@ -41,17 +41,16 @@ public void grad(double[] w, double[] g) {
4141

4242
@Override
4343
public void get_diag_preconditioner(double[] M) {
44-
int l = prob.l;
44+
int l = prob.getL();
4545
int w_size = get_nr_variable();
46-
Feature[][] x = prob.x;
4746

4847
for (int i = 0; i < w_size; i++)
4948
M[i] = 1;
5049
if (!regularize_bias)
5150
M[w_size - 1] = 0;
5251

5352
for (int i = 0; i < l; i++) {
54-
for (Feature xi : x[i]) {
53+
for (Feature xi : prob.getX(i)) {
5554
M[xi.getIndex() - 1] += xi.getValue() * xi.getValue() * C[i] * D[i];
5655
}
5756
}
@@ -60,14 +59,13 @@ public void get_diag_preconditioner(double[] M) {
6059
@Override
6160
public void Hv(double[] s, double[] Hs) {
6261
int i;
63-
int l = prob.l;
62+
int l = prob.getL();
6463
int w_size = get_nr_variable();
65-
Feature[][] x = prob.x;
6664

6765
for (i = 0; i < w_size; i++)
6866
Hs[i] = 0;
6967
for (i = 0; i < l; i++) {
70-
Feature[] xi = x[i];
68+
Feature[] xi = prob.getX(i);
7169
double xTs = SparseOperator.dot(s, xi);
7270

7371
xTs = C[i] * D[i] * xTs;

0 commit comments

Comments
 (0)