/****************************************************************
 * Licensed to the Apache Software Foundation (ASF) under one   *
 * or more contributor license agreements.  See the NOTICE file *
 * distributed with this work for additional information        *
 * regarding copyright ownership.  The ASF licenses this file   *
 * to you under the Apache License, Version 2.0 (the            *
 * "License"); you may not use this file except in compliance   *
 * with the License.  You may obtain a copy of the License at   *
 *                                                              *
 *   http://www.apache.org/licenses/LICENSE-2.0                 *
 *                                                              *
 * Unless required by applicable law or agreed to in writing,   *
 * software distributed under the License is distributed on an  *
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY       *
 * KIND, either express or implied.  See the License for the    *
 * specific language governing permissions and limitations      *
 * under the License.                                           *
 ****************************************************************/
package cjames;

import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.cassandra.thrift.Cassandra;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.thrift.ColumnPath;
import org.apache.cassandra.thrift.ConsistencyLevel;
import org.apache.cassandra.thrift.InvalidRequestException;
import org.apache.cassandra.thrift.NotFoundException;
import org.apache.cassandra.thrift.TBinaryProtocol;
import org.apache.cassandra.thrift.TimedOutException;
import org.apache.cassandra.thrift.UnavailableException;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TProtocol;
import org.apache.cassandra.thrift.Column;
import org.apache.thrift.transport.TFramedTransport;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
/**
 * Provide an {@link OutputStream} which will write to a row. The written data
 * will be split up by chunks of the given chunkSize. Each chunk we get written
 * to own column which will have the chunk number (starting at 0) as column key
 * (Long).
 *
 * This implementation is not thread-safe!
 * Based on Hector implementation for Cassandra.
 * https://github.com/rantav/hector/blob/master/core/src/main/java/me/prettyprint/cassandra/io/ChunkOutputStream.java
 */
public class ChunkOutputStream extends OutputStream {

    private final String cf;
    private final String cn;
    private final String key;
    private byte[] chunk;
    private final String keyspace;
    private int pos;
    private long chunkPos = 1;
    
    public ChunkOutputStream(String keyspace,String cf, String cn, String key,int chunkSize) {
        this.key = key;
        this.cn=cn;
        this.cf = cf;
        this.keyspace=keyspace;
        this.chunk = new byte[chunkSize];
    }

    public ChunkOutputStream(ByteBuffer keyspace,ByteBuffer cf, ByteBuffer cn, ByteBuffer key,int chunksize) throws CharacterCodingException {
        
        this(ByteBufferUtil.string(keyspace),ByteBufferUtil.string(cf),ByteBufferUtil.string(cn),ByteBufferUtil.string(key),chunksize);
    }
    
    
    
    /*
     * (non-Javadoc)
     *
     * @see java.io.OutputStream#write(int)
     */
    @Override
    public void write(int b) throws IOException {
        if (chunk.length - 1 == pos) {
            flush();
        }
        chunk[(int) pos++] = (byte) b;
    }

    @Override
    public void close() throws IOException {
        try {
            writeData(true);
        } catch (TTransportException ex) {
            Logger.getLogger(ChunkOutputStream.class.getName()).log(Level.SEVERE, null, ex);
        } catch (InvalidRequestException ex) {
            Logger.getLogger(ChunkOutputStream.class.getName()).log(Level.SEVERE, null, ex);
        } catch (TException ex) {
            Logger.getLogger(ChunkOutputStream.class.getName()).log(Level.SEVERE, null, ex);
        } catch (NotFoundException ex) {
            Logger.getLogger(ChunkOutputStream.class.getName()).log(Level.SEVERE, null, ex);
        } catch (UnavailableException ex) {
            Logger.getLogger(ChunkOutputStream.class.getName()).log(Level.SEVERE, null, ex);
        } catch (TimedOutException ex) {
            Logger.getLogger(ChunkOutputStream.class.getName()).log(Level.SEVERE, null, ex);
        }
    }

    /**
     * Trigger a flush. This will only write the content to the column if the
     * chunk size is reached
     */
    @Override
    public void flush() throws IOException {
        try {
            writeData(false);
        } catch (TTransportException ex) {
            Logger.getLogger(ChunkOutputStream.class.getName()).log(Level.SEVERE, null, ex);
        } catch (InvalidRequestException ex) {
            Logger.getLogger(ChunkOutputStream.class.getName()).log(Level.SEVERE, null, ex);
        } catch (TException ex) {
            Logger.getLogger(ChunkOutputStream.class.getName()).log(Level.SEVERE, null, ex);
        } catch (NotFoundException ex) {
            Logger.getLogger(ChunkOutputStream.class.getName()).log(Level.SEVERE, null, ex);
        } catch (UnavailableException ex) {
            Logger.getLogger(ChunkOutputStream.class.getName()).log(Level.SEVERE, null, ex);
        } catch (TimedOutException ex) {
            Logger.getLogger(ChunkOutputStream.class.getName()).log(Level.SEVERE, null, ex);
        }
    }

    /**
     * Write the data to column if the configured chunk size is reached or if the
     * stream should be closed
     *
     * @param close
     * @throws IOException
     */
    private void writeData(boolean close) throws IOException, TTransportException, InvalidRequestException, TException, NotFoundException, UnavailableException, TimedOutException 
    {
        if (pos != 0 && (close || pos == chunk.length - 1)) 
        {
            //connection:
            
            TTransport transport = new TFramedTransport(new TSocket("localhost", 9160));
            TProtocol protocol = new TBinaryProtocol(transport);
            transport.open();
            Cassandra.Client client = new Cassandra.Client(protocol);
            client.set_keyspace(keyspace);
                   	
            ColumnPath path = new ColumnPath(cf);
 	    path.setColumn(ByteBufferUtil.bytes(cn));
	
             ColumnOrSuperColumn result = client.get(ByteBufferUtil.bytes(key),path,ConsistencyLevel.ONE);
 	     Column cname =  result.getColumn();
             
             if(cname != null)
             {
                 cname.setValue(chunk);
                 chunkPos++;
                 pos = 0;
           
             }
        }
    }
    
    
}