import numpy as np
import sys
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import Lasso 

def load(fn):
  f = open(fn)
  n, m = map(int, f.readline().strip().split())
  X = []
  y = []
  for l in f:
    its = l.strip().split()
    X.append([float(x) for x in its[:-1]])
    y.append(float(its[-1]))
  return X, y

## tuto funkciu doprogramujte
## fill in this function
def select_features(X, y):
  # Takto vytvorite model s L1 regularizaciou
  # You can create l1 regularized model like this
  clf = Lasso(alpha=0.01)
  
  # takto ho fitnete na data
  # you can train the model like this
  clf.fit(X, y)

  # takto si pozriete parametre
  # here you can check its parameters
  clf.coef_

  # takto z neho dostanete predikciu (bolo by slusne tam dat iny vstup, ale
  # na ukazku to staci)
  # 
  # calling predict gives you prediction (we usually do it on other data than
  # training, but this is for demonstration)
  y_pred = clf.predict(X)

  # takto viete dostat cross_validovanu testovaciu chybu
  # cross_val_score vracia pole chyb, jednu pre kazde rozdelenie
  # 
  # this gives you cross validation error
  # cross_val_score return list of errors, one for each fold 
  score = -np.mean(cross_val_score(clf, X, y, cv=5, scoring='neg_mean_squared_error'))

  # Vratte indexy dobrych features
  # Return indices of good features
  return [4, 7, 12]

X, y = load(sys.argv[1])

print ("Good features", select_features(X, y))
